diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 263b44fd..460a885b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,10 +18,10 @@ jobs: name: Build runs-on: ubuntu-latest steps: - - name: Set up Go 1.20 + - name: Set up Go 1.21 uses: actions/setup-go@v1 with: - go-version: '1.20' + go-version: '1.21' id: go - uses: actions/checkout@v1 @@ -44,4 +44,3 @@ jobs: - name: Run checks run: | make ci - make gen-crd-protos diff --git a/.github/workflows/update-crds.yml b/.github/workflows/update-crds.yml index a25c49cd..054bb5e5 100644 --- a/.github/workflows/update-crds.yml +++ b/.github/workflows/update-crds.yml @@ -15,10 +15,10 @@ jobs: name: Build runs-on: ubuntu-latest steps: - - name: Set up Go 1.20 + - name: Set up Go 1.21 uses: actions/setup-go@v1 with: - go-version: '1.20' + go-version: '1.21' id: go - uses: actions/checkout@v1 diff --git a/Makefile b/Makefile index 94396c92..8cc79ab3 100644 --- a/Makefile +++ b/Makefile @@ -62,7 +62,7 @@ ARCH := $(if $(GOARCH),$(GOARCH),$(shell go env GOARCH)) BASEIMAGE_PROD ?= gcr.io/distroless/static-debian11 BASEIMAGE_DBG ?= debian:bullseye -GO_VERSION ?= 1.20 +GO_VERSION ?= 1.21 BUILD_IMAGE ?= ghcr.io/appscode/golang-dev:$(GO_VERSION) OUTBIN = bin/$(BIN)-$(OS)-$(ARCH) @@ -311,7 +311,7 @@ unit-tests: $(BUILD_DIRS) ./hack/test.sh $(SRC_PKGS) \ " -ADDTL_LINTERS := goconst,gofmt,goimports,unparam +ADDTL_LINTERS := gofmt,goimports,unparam .PHONY: lint lint: $(BUILD_DIRS) diff --git a/apis/cloud/constants.go b/apis/cloud/constants.go index 44baba26..173520b2 100644 --- a/apis/cloud/constants.go +++ b/apis/cloud/constants.go @@ -26,7 +26,7 @@ const ( LabelCredentialType = "byte.builders/cluster-credential-type" LabelCredentialOwnerID = "byte.builders/cluster-credential-owner-id" - KeyCloudProvider = "cloud.bytebuilders.dev/provider" + KeyCloudProvider = "cloud.appscode.com/provider" ) const ( diff --git a/apis/cloud/register.go b/apis/cloud/register.go index 14b387a4..f47f9b14 100644 --- a/apis/cloud/register.go +++ b/apis/cloud/register.go @@ -17,4 +17,4 @@ limitations under the License. package cloud // GroupName is the group name use in this package -const GroupName = "cloud.bytebuilders.dev" +const GroupName = "cloud.appscode.com" diff --git a/apis/cloud/v1alpha1/doc.go b/apis/cloud/v1alpha1/doc.go index 6bc09456..92e881d2 100644 --- a/apis/cloud/v1alpha1/doc.go +++ b/apis/cloud/v1alpha1/doc.go @@ -21,5 +21,5 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:defaulter-gen=TypeMeta -// +groupName=cloud.bytebuilders.dev +// +groupName=cloud.appscode.com package v1alpha1 diff --git a/apis/cloud/v1alpha1/generated.proto b/apis/cloud/v1alpha1/generated.proto deleted file mode 100644 index d409f2ad..00000000 --- a/apis/cloud/v1alpha1/generated.proto +++ /dev/null @@ -1,266 +0,0 @@ -/* -Copyright 2020 AppsCode Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = "proto2"; - -package go.bytebuilders.dev.resource_model.apis.cloud.v1alpha1; - -import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "go.bytebuilders.dev/resource-model/apis/cloud/v1alpha1"; - -message AWSCredential { - optional string accessKeyID = 1; - - optional string secretAccessKey = 2; -} - -message AzureCredential { - optional string tenantID = 1; - - optional string subscriptionID = 2; - - optional string clientID = 3; - - optional string clientSecret = 4; -} - -message AzureStorageCredential { - optional string account = 1; - - optional string key = 2; -} - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=cloudproviders,singular=cloudprovider,scope=Cluster,categories={cloud,appscode} -message CloudProvider { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - optional CloudProviderSpec spec = 2; -} - -// CloudProviderList contains a list of CloudProvider -message CloudProviderList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated CloudProvider items = 2; -} - -// CloudProviderSpec defines the desired state of CloudProvider -message CloudProviderSpec { - repeated Region regions = 1; - - repeated MachineType machineTypes = 2; -} - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=credentials,singular=credential,shortName=cred,scope=Cluster,categories={kubernetes,resource-model,appscode} -// +kubebuilder:subresource:status -message Credential { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - optional CredentialSpec spec = 2; - - optional CredentialStatus status = 3; -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true -message CredentialList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated Credential items = 2; -} - -message CredentialSpec { - optional string name = 1; - - optional string type = 2; - - optional int64 ownerID = 3; - - // +optional - optional AWSCredential aws = 4; - - // +optional - optional AzureCredential azure = 5; - - // +optional - optional AzureStorageCredential azureStorage = 6; - - // +optional - optional DigitalOceanCredential digitalocean = 7; - - // +optional - optional GoogleCloudCredential googleCloud = 8; - - // +optional - optional GoogleOAuthCredential googleOAuth = 9; - - // +optional - optional LinodeCredential linode = 10; - - // +optional - optional PacketCredential packet = 11; - - // +optional - optional RancherCredential rancher = 12; - - // +optional - optional ScalewayCredential scaleway = 13; - - // +optional - optional SwiftCredential swift = 14; - - // +optional - optional VultrCredential vultr = 15; -} - -message CredentialStatus { - // ObservedGeneration is the most recent generation observed for this resource. It corresponds to the - // resource's generation, which is updated on mutation by the API Server. - // +optional - optional int64 observedGeneration = 1; -} - -message DigitalOceanCredential { - optional string token = 1; -} - -message GoogleCloudCredential { - optional string projectID = 1; - - optional string serviceAccount = 2; -} - -message GoogleOAuthCredential { - optional string clientID = 1; - - optional string clientSecret = 2; - - optional string accessToken = 3; - - // +optional - optional string refreshToken = 4; - - // +optional - repeated string scopes = 5; - - // +optional - optional int64 expiry = 6; -} - -message LinodeCredential { - optional string token = 1; -} - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=machinetypes,singular=machinetype,scope=Cluster,categories={cloud,appscode} -// +kubebuilder:printcolumn:name="SKU",type="string",JSONPath=".spec.sku" -// +kubebuilder:printcolumn:name="CPU",type="string",JSONPath=".spec.cpu" -// +kubebuilder:printcolumn:name="RAM",type="string",JSONPath=".spec.ram" -message MachineType { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - optional MachineTypeSpec spec = 2; -} - -// MachineTypeList contains a list of MachineType -message MachineTypeList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated MachineType items = 2; -} - -// MachineTypeSpec defines the desired state of MachineType -message MachineTypeSpec { - optional string sku = 1; - - optional string description = 2; - - optional string category = 3; - - optional k8s.io.apimachinery.pkg.api.resource.Quantity cpu = 4; - - optional k8s.io.apimachinery.pkg.api.resource.Quantity ram = 5; - - optional k8s.io.apimachinery.pkg.api.resource.Quantity disk = 6; - - repeated string regions = 7; - - repeated string zones = 8; - - optional bool deprecated = 9; -} - -message PacketCredential { - optional string projectID = 1; - - optional string apiKey = 2; -} - -message RancherCredential { - optional string accessKeyID = 1; - - optional string secretAccessKey = 2; - - optional string endpoint = 3; -} - -// Region defines the desired state of Region -message Region { - optional string region = 1; - - repeated string zones = 2; - - optional string location = 3; -} - -message ScalewayCredential { - optional string organization = 1; - - optional string token = 2; -} - -message SwiftCredential { - optional string username = 1; - - optional string password = 2; - - optional string tenantName = 3; - - optional string tenantAuthURL = 4; - - optional string domain = 5; - - optional string region = 6; - - optional string tenantID = 7; - - optional string tenantDomain = 8; -} - -message VultrCredential { - optional string token = 1; -} - diff --git a/apis/cluster/register.go b/apis/cluster/register.go index c3cb5baf..04807ec5 100644 --- a/apis/cluster/register.go +++ b/apis/cluster/register.go @@ -17,4 +17,4 @@ limitations under the License. package cluster // GroupName is the group name use in this package -const GroupName = "cluster.bytebuilders.dev" +const GroupName = "cluster.appscode.com" diff --git a/apis/cluster/v1alpha1/doc.go b/apis/cluster/v1alpha1/doc.go index 289df9cb..70a91eea 100644 --- a/apis/cluster/v1alpha1/doc.go +++ b/apis/cluster/v1alpha1/doc.go @@ -20,5 +20,5 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:defaulter-gen=TypeMeta -// +groupName=cluster.bytebuilders.dev +// +groupName=cluster.appscode.com package v1alpha1 diff --git a/apis/cluster/v1alpha1/generated.proto b/apis/cluster/v1alpha1/generated.proto deleted file mode 100644 index 93434ab1..00000000 --- a/apis/cluster/v1alpha1/generated.proto +++ /dev/null @@ -1,343 +0,0 @@ -/* -Copyright 2020 AppsCode Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = "proto2"; - -package go.bytebuilders.dev.resource_model.apis.cluster.v1alpha1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "go.bytebuilders.dev/resource-model/apis/cluster/v1alpha1"; - -message AWSProvider { - // +optional - optional string region = 1; - - // +optional - optional string clusterID = 2; - - // +optional - optional string assumeRoleArn = 3; - - // +optional - optional string assumeRoleExternalID = 4; - - // +optional - optional string sessionName = 5; - - // +optional - optional bool forwardSessionName = 6; - - // +optional - optional bool cache = 7; - - // Temporary Token for 15 mins only, if expired or not set create a new one - // +optional - optional string token = 8; - - // +optional - optional int64 expiration = 9; - - optional string accessKeyID = 10; - - optional string secretAccessKey = 11; -} - -// AuthProviderConfig holds the configuration for a specified auth provider. -message AuthProviderConfig { - optional string name = 1; - - // +optional - map config = 2; -} - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=clusterauthinfotemplates,singular=clusterauthinfotemplate,shortName=cauth,scope=Cluster,categories={kubernetes,resource-model,appscode} -// +kubebuilder:subresource:status -message ClusterAuthInfoTemplate { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - optional ClusterAuthInfoTemplateSpec spec = 2; - - optional ClusterAuthInfoTemplateStatus status = 3; -} - -message ClusterAuthInfoTemplateList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated ClusterAuthInfoTemplate items = 2; -} - -message ClusterAuthInfoTemplateSpec { - optional string uid = 1; - - optional int64 ownerID = 2; - - // CertificateAuthorityData contains PEM-encoded certificate authority certificates. - // +optional - optional bytes certificateAuthorityData = 3; - - // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. - // +optional - optional bytes clientCertificateData = 4; - - // ClientKeyData contains PEM-encoded data from a client key file for TLS. - // +optional - optional bytes clientKeyData = 5; - - // Token is the bearer token for authentication to the kubernetes cluster. - // +optional - optional string token = 6; - - // Username is the username for basic authentication to the kubernetes cluster. - // +optional - optional string username = 7; - - // Password is the password for basic authentication to the kubernetes cluster. - // +optional - optional string password = 8; - - // Impersonate is the username to act-as. - // +optional - optional string impersonate = 9; - - // ImpersonateGroups is the groups to impersonate. - // +optional - repeated string impersonateGroups = 10; - - // ImpersonateUserExtra contains additional information for impersonated user. - // +optional - map impersonateUserExtra = 11; - - // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. - // +optional - optional AuthProviderConfig authProvider = 13; -} - -message ClusterAuthInfoTemplateStatus { - // ObservedGeneration is the most recent generation observed for this resource. It corresponds to the - // resource's generation, which is updated on mutation by the API Server. - // +optional - optional int64 observedGeneration = 1; -} - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=clusterinfos,singular=clusterinfo,shortName=cinfo,scope=Cluster,categories={kubernetes,resource-model,appscode} -// +kubebuilder:subresource:status -message ClusterInfo { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - optional ClusterInfoSpec spec = 2; - - optional ClusterInfoStatus status = 3; -} - -message ClusterInfoList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated ClusterInfo items = 2; -} - -message ClusterInfoSpec { - optional string displayName = 1; - - optional string name = 2; - - optional string uid = 3; - - optional int64 ownerID = 4; - - // +optional - optional string externalID = 5; - - // +optional - optional string ownerName = 6; - - // +optional - optional string provider = 7; - - // +optional - optional string endpoint = 8; - - // +optional - optional string location = 9; - - // +optional - optional string project = 10; - - // +optional - optional string kubernetesVersion = 11; - - // +optional - optional int32 nodeCount = 12; - - // +optional - optional int64 createdAt = 13; - - // +optional - optional string age = 14; -} - -message ClusterInfoStatus { - // ObservedGeneration is the most recent generation observed for this resource. It corresponds to the - // resource's generation, which is updated on mutation by the API Server. - // +optional - optional int64 observedGeneration = 1; - - // Phase represents current status of the cluster - // +optional - optional string phase = 2; - - // Reason explains the reason behind the cluster current phase - // +optional - optional string reason = 3; - - // Message specifies additional information regarding the possible actions for the user - // +optional - optional string message = 4; -} - -message ClusterOptions { - optional string resourceName = 1; - - optional string provider = 2; - - optional int64 userID = 3; - - optional string cID = 4; - - optional int64 ownerID = 5; - - optional string importType = 6; - - optional string externalID = 7; - - optional string connectorLinkID = 8; -} - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=clusteruserauths,singular=clusteruserauth,shortName=uauth,scope=Cluster,categories={kubernetes,resource-model,appscode} -// +kubebuilder:subresource:status -message ClusterUserAuth { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - optional ClusterUserAuthSpec spec = 2; - - optional ClusterUserAuthStatus status = 3; -} - -message ClusterUserAuthList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated ClusterUserAuth items = 2; -} - -message ClusterUserAuthSpec { - optional string clusterUID = 1; - - optional int64 userID = 2; - - // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. - // +optional - optional bytes clientCertificateData = 3; - - // ClientKeyData contains PEM-encoded data from a client key file for TLS. - // +optional - optional bytes clientKeyData = 4; - - // Token is the bearer token for authentication to the kubernetes cluster. - // +optional - optional string token = 5; - - // Username is the username for basic authentication to the kubernetes cluster. - // +optional - optional string username = 6; - - // Password is the password for basic authentication to the kubernetes cluster. - // +optional - optional string password = 7; - - // Impersonate is the username to act-as. - // +optional - optional string impersonate = 8; - - // ImpersonateGroups is the groups to impersonate. - // +optional - repeated string impersonateGroups = 9; - - // ImpersonateUserExtra contains additional information for impersonated user. - // +optional - map impersonateUserExtra = 10; - - // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. - // +optional - optional AuthProviderConfig authProvider = 11; - - // Provider Access Token params - // +optional - optional string provider = 12; - - optional GoogleOAuthProvider googleOAuth = 13; - - optional GoogleCloudCredential googleCloud = 14; - - optional AWSProvider aws = 15; - - optional string credentialName = 16; -} - -message ClusterUserAuthStatus { - // ObservedGeneration is the most recent generation observed for this resource. It corresponds to the - // resource's generation, which is updated on mutation by the API Server. - // +optional - optional int64 observedGeneration = 1; -} - -// ExtraValue masks the value so protobuf can generate -// +protobuf.nullable=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -message ExtraValue { - // items, if empty, will result in an empty slice - - repeated string items = 1; -} - -message GoogleCloudCredential { - optional string projectID = 1; - - optional string serviceAccount = 2; -} - -message GoogleOAuthProvider { - optional string clientID = 1; - - optional string clientSecret = 2; - - optional string accessToken = 3; - - // +optional - optional string refreshToken = 4; - - // +optional - optional int64 expiry = 5; -} - diff --git a/apis/identity/fuzzer/fuzzer.go b/apis/identity/fuzzer/fuzzer.go deleted file mode 100644 index bbb94e71..00000000 --- a/apis/identity/fuzzer/fuzzer.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright The KubeVault Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fuzzer - -import ( - "go.bytebuilders.dev/resource-model/apis/identity/v1alpha1" - - fuzz "github.com/google/gofuzz" - runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" -) - -// Funcs returns the fuzzer functions for this api group. -var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} { - return []interface{}{ - // v1alpha1 - func(s *v1alpha1.Team, c fuzz.Continue) { - c.FuzzNoCustom(s) // fuzz self without calling this function again - }, - } -} diff --git a/apis/identity/install/install.go b/apis/identity/install/install.go deleted file mode 100644 index 09f52260..00000000 --- a/apis/identity/install/install.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2020 AppsCode Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package install - -import ( - "go.bytebuilders.dev/resource-model/apis/identity/v1alpha1" - - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" -) - -// Install registers the API group and adds types to a scheme -func Install(scheme *runtime.Scheme) { - utilruntime.Must(v1alpha1.AddToScheme(scheme)) - utilruntime.Must(scheme.SetVersionPriority(v1alpha1.SchemeGroupVersion)) -} diff --git a/apis/identity/install/roundtrip_test.go b/apis/identity/install/roundtrip_test.go deleted file mode 100644 index 5fdf5d38..00000000 --- a/apis/identity/install/roundtrip_test.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright 2020 AppsCode Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package install - -import ( - "testing" - - "k8s.io/apimachinery/pkg/api/apitesting/roundtrip" -) - -func TestRoundTripTypes(t *testing.T) { - roundtrip.RoundTripTestForAPIGroup(t, Install, nil) -} diff --git a/apis/identity/register.go b/apis/identity/register.go deleted file mode 100644 index 0c131833..00000000 --- a/apis/identity/register.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2020 AppsCode Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package identity - -// GroupName is the group name use in this package -const GroupName = "identity.bytebuilders.dev" diff --git a/apis/identity/v1alpha1/doc.go b/apis/identity/v1alpha1/doc.go deleted file mode 100644 index bfafdbdc..00000000 --- a/apis/identity/v1alpha1/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2020 AppsCode Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1alpha1 is the v1alpha1 version of the API. - -// +k8s:deepcopy-gen=package -// +k8s:conversion-gen=go.bytebuilders.dev/resource-model/apis/identity -// +k8s:openapi-gen=true -// +k8s:defaulter-gen=TypeMeta - -// +groupName=identity.bytebuilders.dev -package v1alpha1 diff --git a/apis/identity/v1alpha1/generated.proto b/apis/identity/v1alpha1/generated.proto deleted file mode 100644 index 813db289..00000000 --- a/apis/identity/v1alpha1/generated.proto +++ /dev/null @@ -1,180 +0,0 @@ -/* -Copyright 2020 AppsCode Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = "proto2"; - -package go.bytebuilders.dev.resource_model.apis.identity.v1alpha1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "go.bytebuilders.dev/resource-model/apis/identity/v1alpha1"; - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=orgusers,singular=orguser,scope=Cluster,categories={kubernetes,resource-model,appscode} -// +kubebuilder:subresource:status -message OrgUser { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - optional OrgUserSpec spec = 2; - - optional OrgUserStatus status = 3; -} - -message OrgUserList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated OrgUser items = 2; -} - -message OrgUserSpec { - optional int64 userID = 1; - - // +optional - optional string userName = 2; - - // +optional - optional string fullName = 3; - - // +optional - optional string email = 4; - - // +optional - optional string avatarURL = 5; - - optional int64 orgID = 6; - - // +optional - optional string orgName = 7; - - optional bool isPublic = 8; - - // +optional - optional string teamName = 9; -} - -message OrgUserStatus { - // ObservedGeneration is the most recent generation observed for this resource. It corresponds to the - // resource's generation, which is updated on mutation by the API Server. - // +optional - optional int64 observedGeneration = 1; -} - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=teams,singular=team,scope=Cluster,categories={kubernetes,resource-model,appscode} -// +kubebuilder:subresource:status -message Team { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - optional TeamSpec spec = 2; - - optional TeamStatus status = 3; -} - -message TeamList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated Team items = 2; -} - -message TeamSpec { - optional string name = 1; - - optional string lowerName = 2; - - optional string uid = 3; - - repeated string tags = 4; - - optional int64 ownerID = 5; - - // +optional - optional string orgName = 6; - - optional string description = 7; - - optional string authorize = 8; - - optional int64 numMembers = 9; - - // +optional - repeated TeamUser members = 10; -} - -message TeamStatus { - // ObservedGeneration is the most recent generation observed for this resource. It corresponds to the - // resource's generation, which is updated on mutation by the API Server. - // +optional - optional int64 observedGeneration = 1; -} - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=teamusers,singular=teamuser,scope=Cluster,categories={kubernetes,resource-model,appscode} -// +kubebuilder:subresource:status -message TeamUser { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - optional TeamUserSpec spec = 2; - - optional TeamUserStatus status = 3; -} - -message TeamUserList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated TeamUser items = 2; -} - -message TeamUserSpec { - optional int64 userID = 1; - - // +optional - optional string userName = 2; - - // +optional - optional string fullName = 3; - - // +optional - optional string email = 4; - - // +optional - optional string avatarURL = 5; - - optional int64 orgID = 6; - - // +optional - optional string orgName = 7; - - optional int64 teamID = 8; - - // +optional - optional int64 teamName = 9; - - optional bool isPublic = 10; -} - -message TeamUserStatus { - // ObservedGeneration is the most recent generation observed for this resource. It corresponds to the - // resource's generation, which is updated on mutation by the API Server. - // +optional - optional int64 observedGeneration = 1; -} - diff --git a/apis/identity/v1alpha1/org_user_helpers.go b/apis/identity/v1alpha1/org_user_helpers.go deleted file mode 100644 index 4e49d4c0..00000000 --- a/apis/identity/v1alpha1/org_user_helpers.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright 2020 AppsCode Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "go.bytebuilders.dev/resource-model/crds" - - "kmodules.xyz/client-go/apiextensions" -) - -func (_ OrgUser) CustomResourceDefinition() *apiextensions.CustomResourceDefinition { - return crds.MustCustomResourceDefinition(SchemeGroupVersion.WithResource(ResourceOrgUsers)) -} diff --git a/apis/identity/v1alpha1/org_user_types.go b/apis/identity/v1alpha1/org_user_types.go deleted file mode 100644 index 189ecfa0..00000000 --- a/apis/identity/v1alpha1/org_user_types.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright 2020 AppsCode Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - ResourceKindOrgUser = "OrgUser" - ResourceOrgUser = "orguser" - ResourceOrgUsers = "orgusers" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=orgusers,singular=orguser,scope=Cluster,categories={kubernetes,resource-model,appscode} -// +kubebuilder:subresource:status -type OrgUser struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - Spec OrgUserSpec `json:"spec,omitempty"` - Status OrgUserStatus `json:"status,omitempty"` -} - -type OrgUserSpec struct { - UserID int64 `json:"userID"` - // +optional - UserName string `json:"userName,omitempty"` - // +optional - FullName string `json:"fullName,omitempty"` - // +optional - Email string `json:"email,omitempty"` - // +optional - AvatarURL string `json:"avatarURL,omitempty"` - OrgID int64 `json:"orgID"` - // +optional - OrgName string `json:"orgName,omitempty"` - IsPublic bool `json:"isPublic"` - // +optional - TeamName string `json:"teamName"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true - -type OrgUserList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []OrgUser `json:"items,omitempty"` -} - -type OrgUserStatus struct { - // ObservedGeneration is the most recent generation observed for this resource. It corresponds to the - // resource's generation, which is updated on mutation by the API Server. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty"` -} diff --git a/apis/identity/v1alpha1/register.go b/apis/identity/v1alpha1/register.go deleted file mode 100644 index ca3dd34e..00000000 --- a/apis/identity/v1alpha1/register.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2020 AppsCode Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "go.bytebuilders.dev/resource-model/apis/identity" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: identity.GroupName, Version: "v1alpha1"} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) -} - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Team{}, - &TeamList{}, - - &TeamUser{}, - &TeamUserList{}, - - &OrgUser{}, - &OrgUserList{}, - - &TeamUser{}, - &TeamUserList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} diff --git a/apis/identity/v1alpha1/team_helpers.go b/apis/identity/v1alpha1/team_helpers.go deleted file mode 100644 index f5dc5062..00000000 --- a/apis/identity/v1alpha1/team_helpers.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright 2020 AppsCode Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "go.bytebuilders.dev/resource-model/crds" - - "kmodules.xyz/client-go/apiextensions" -) - -func (_ Team) CustomResourceDefinition() *apiextensions.CustomResourceDefinition { - return crds.MustCustomResourceDefinition(SchemeGroupVersion.WithResource(ResourceTeams)) -} diff --git a/apis/identity/v1alpha1/team_types.go b/apis/identity/v1alpha1/team_types.go deleted file mode 100644 index d1c3bc78..00000000 --- a/apis/identity/v1alpha1/team_types.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright 2020 AppsCode Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - ResourceKindTeam = "Team" - ResourceTeam = "team" - ResourceTeams = "teams" -) - -type AccessMode string - -const ( - AccessModeNone AccessMode = "none" - AccessModeRead AccessMode = "read" - AccessModeWrite AccessMode = "write" - AccessModeAdmin AccessMode = "admin" - AccessModeOwner AccessMode = "owner" -) - -// +kubebuilder:validation:Enum=Dev;Front-End;Back-End -type TeamTag string - -const ( - TeamTagDev TeamTag = "Dev" - TeamTagFrontEnd TeamTag = "Front-End" - TeamTagBackEnd TeamTag = "Back-End" -) - -const ( - OwnerTeamName = "Owners" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=teams,singular=team,scope=Cluster,categories={kubernetes,resource-model,appscode} -// +kubebuilder:subresource:status -type Team struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - Spec TeamSpec `json:"spec,omitempty"` - Status TeamStatus `json:"status,omitempty"` -} - -type TeamSpec struct { - Name string `json:"name"` - LowerName string `json:"lowerName"` - UID string `json:"uid"` - Tags []TeamTag `json:"tags"` - OrgID int64 `json:"ownerID"` - //+optional - OrgName string `json:"orgName,omitempty"` - Description string `json:"description,omitempty"` - Authorize AccessMode `json:"authorize"` - NumMembers int64 `json:"numMembers"` - //+optional - Members []TeamUser `json:"members,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true - -type TeamList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []Team `json:"items,omitempty"` -} - -type TeamStatus struct { - // ObservedGeneration is the most recent generation observed for this resource. It corresponds to the - // resource's generation, which is updated on mutation by the API Server. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty"` -} diff --git a/apis/identity/v1alpha1/team_user_helpers.go b/apis/identity/v1alpha1/team_user_helpers.go deleted file mode 100644 index 4e4d5201..00000000 --- a/apis/identity/v1alpha1/team_user_helpers.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright 2020 AppsCode Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "go.bytebuilders.dev/resource-model/crds" - - "kmodules.xyz/client-go/apiextensions" -) - -func (_ TeamUser) CustomResourceDefinition() *apiextensions.CustomResourceDefinition { - return crds.MustCustomResourceDefinition(SchemeGroupVersion.WithResource(ResourceTeamUsers)) -} diff --git a/apis/identity/v1alpha1/team_user_types.go b/apis/identity/v1alpha1/team_user_types.go deleted file mode 100644 index adb993b5..00000000 --- a/apis/identity/v1alpha1/team_user_types.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2020 AppsCode Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - ResourceKindTeamUser = "TeamUser" - ResourceTeamUser = "teamuser" - ResourceTeamUsers = "teamusers" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// +kubebuilder:object:root=true -// +kubebuilder:resource:path=teamusers,singular=teamuser,scope=Cluster,categories={kubernetes,resource-model,appscode} -// +kubebuilder:subresource:status -type TeamUser struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - Spec TeamUserSpec `json:"spec,omitempty"` - Status TeamUserStatus `json:"status,omitempty"` -} - -type TeamUserSpec struct { - UserID int64 `json:"userID"` - // +optional - UserName string `json:"userName,omitempty"` - // +optional - FullName string `json:"fullName,omitempty"` - // +optional - Email string `json:"email,omitempty"` - // +optional - AvatarURL string `json:"avatarURL,omitempty"` - OrgID int64 `json:"orgID"` - // +optional - OrgName string `json:"orgName,omitempty"` - TeamID int64 `json:"teamID"` - // +optional - TeamName int64 `json:"teamName"` - IsPublic bool `json:"isPublic"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +kubebuilder:object:root=true - -type TeamUserList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []TeamUser `json:"items,omitempty"` -} - -type TeamUserStatus struct { - // ObservedGeneration is the most recent generation observed for this resource. It corresponds to the - // resource's generation, which is updated on mutation by the API Server. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty"` -} diff --git a/apis/identity/v1alpha1/zz_generated.deepcopy.go b/apis/identity/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index dbae1dac..00000000 --- a/apis/identity/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,317 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright 2020 AppsCode Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OrgUser) DeepCopyInto(out *OrgUser) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrgUser. -func (in *OrgUser) DeepCopy() *OrgUser { - if in == nil { - return nil - } - out := new(OrgUser) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OrgUser) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OrgUserList) DeepCopyInto(out *OrgUserList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]OrgUser, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrgUserList. -func (in *OrgUserList) DeepCopy() *OrgUserList { - if in == nil { - return nil - } - out := new(OrgUserList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *OrgUserList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OrgUserSpec) DeepCopyInto(out *OrgUserSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrgUserSpec. -func (in *OrgUserSpec) DeepCopy() *OrgUserSpec { - if in == nil { - return nil - } - out := new(OrgUserSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *OrgUserStatus) DeepCopyInto(out *OrgUserStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OrgUserStatus. -func (in *OrgUserStatus) DeepCopy() *OrgUserStatus { - if in == nil { - return nil - } - out := new(OrgUserStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Team) DeepCopyInto(out *Team) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Team. -func (in *Team) DeepCopy() *Team { - if in == nil { - return nil - } - out := new(Team) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Team) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TeamList) DeepCopyInto(out *TeamList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Team, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeamList. -func (in *TeamList) DeepCopy() *TeamList { - if in == nil { - return nil - } - out := new(TeamList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TeamList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TeamSpec) DeepCopyInto(out *TeamSpec) { - *out = *in - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]TeamTag, len(*in)) - copy(*out, *in) - } - if in.Members != nil { - in, out := &in.Members, &out.Members - *out = make([]TeamUser, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeamSpec. -func (in *TeamSpec) DeepCopy() *TeamSpec { - if in == nil { - return nil - } - out := new(TeamSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TeamStatus) DeepCopyInto(out *TeamStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeamStatus. -func (in *TeamStatus) DeepCopy() *TeamStatus { - if in == nil { - return nil - } - out := new(TeamStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TeamUser) DeepCopyInto(out *TeamUser) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeamUser. -func (in *TeamUser) DeepCopy() *TeamUser { - if in == nil { - return nil - } - out := new(TeamUser) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TeamUser) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TeamUserList) DeepCopyInto(out *TeamUserList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]TeamUser, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeamUserList. -func (in *TeamUserList) DeepCopy() *TeamUserList { - if in == nil { - return nil - } - out := new(TeamUserList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TeamUserList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TeamUserSpec) DeepCopyInto(out *TeamUserSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeamUserSpec. -func (in *TeamUserSpec) DeepCopy() *TeamUserSpec { - if in == nil { - return nil - } - out := new(TeamUserSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TeamUserStatus) DeepCopyInto(out *TeamUserStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TeamUserStatus. -func (in *TeamUserStatus) DeepCopy() *TeamUserStatus { - if in == nil { - return nil - } - out := new(TeamUserStatus) - in.DeepCopyInto(out) - return out -} diff --git a/crds/cloud.bytebuilders.dev_cloudproviders.yaml b/crds/cloud.appscode.com_cloudproviders.yaml similarity index 98% rename from crds/cloud.bytebuilders.dev_cloudproviders.yaml rename to crds/cloud.appscode.com_cloudproviders.yaml index 019150ad..f97bf874 100644 --- a/crds/cloud.bytebuilders.dev_cloudproviders.yaml +++ b/crds/cloud.appscode.com_cloudproviders.yaml @@ -4,9 +4,9 @@ metadata: creationTimestamp: null labels: app.kubernetes.io/name: bytebuilders - name: cloudproviders.cloud.bytebuilders.dev + name: cloudproviders.cloud.appscode.com spec: - group: cloud.bytebuilders.dev + group: cloud.appscode.com names: categories: - cloud diff --git a/crds/cloud.bytebuilders.dev_credentials.yaml b/crds/cloud.appscode.com_credentials.yaml similarity index 98% rename from crds/cloud.bytebuilders.dev_credentials.yaml rename to crds/cloud.appscode.com_credentials.yaml index dc84a1ba..a08240b3 100644 --- a/crds/cloud.bytebuilders.dev_credentials.yaml +++ b/crds/cloud.appscode.com_credentials.yaml @@ -4,9 +4,9 @@ metadata: creationTimestamp: null labels: app.kubernetes.io/name: bytebuilders - name: credentials.cloud.bytebuilders.dev + name: credentials.cloud.appscode.com spec: - group: cloud.bytebuilders.dev + group: cloud.appscode.com names: categories: - kubernetes diff --git a/crds/cloud.bytebuilders.dev_machinetypes.yaml b/crds/cloud.appscode.com_machinetypes.yaml similarity index 97% rename from crds/cloud.bytebuilders.dev_machinetypes.yaml rename to crds/cloud.appscode.com_machinetypes.yaml index 3a959fc2..65f989a8 100644 --- a/crds/cloud.bytebuilders.dev_machinetypes.yaml +++ b/crds/cloud.appscode.com_machinetypes.yaml @@ -4,9 +4,9 @@ metadata: creationTimestamp: null labels: app.kubernetes.io/name: bytebuilders - name: machinetypes.cloud.bytebuilders.dev + name: machinetypes.cloud.appscode.com spec: - group: cloud.bytebuilders.dev + group: cloud.appscode.com names: categories: - cloud diff --git a/crds/cluster.bytebuilders.dev_clusterauthinfotemplates.yaml b/crds/cluster.appscode.com_clusterauthinfotemplates.yaml similarity index 97% rename from crds/cluster.bytebuilders.dev_clusterauthinfotemplates.yaml rename to crds/cluster.appscode.com_clusterauthinfotemplates.yaml index 271ecba7..1c1fdd7c 100644 --- a/crds/cluster.bytebuilders.dev_clusterauthinfotemplates.yaml +++ b/crds/cluster.appscode.com_clusterauthinfotemplates.yaml @@ -4,9 +4,9 @@ metadata: creationTimestamp: null labels: app.kubernetes.io/name: bytebuilders - name: clusterauthinfotemplates.cluster.bytebuilders.dev + name: clusterauthinfotemplates.cluster.appscode.com spec: - group: cluster.bytebuilders.dev + group: cluster.appscode.com names: categories: - kubernetes diff --git a/crds/cluster.bytebuilders.dev_clusterinfos.yaml b/crds/cluster.appscode.com_clusterinfos.yaml similarity index 98% rename from crds/cluster.bytebuilders.dev_clusterinfos.yaml rename to crds/cluster.appscode.com_clusterinfos.yaml index bbc35ab6..a5662ad0 100644 --- a/crds/cluster.bytebuilders.dev_clusterinfos.yaml +++ b/crds/cluster.appscode.com_clusterinfos.yaml @@ -4,9 +4,9 @@ metadata: creationTimestamp: null labels: app.kubernetes.io/name: bytebuilders - name: clusterinfos.cluster.bytebuilders.dev + name: clusterinfos.cluster.appscode.com spec: - group: cluster.bytebuilders.dev + group: cluster.appscode.com names: categories: - kubernetes diff --git a/crds/cluster.bytebuilders.dev_clusteruserauths.yaml b/crds/cluster.appscode.com_clusteruserauths.yaml similarity index 98% rename from crds/cluster.bytebuilders.dev_clusteruserauths.yaml rename to crds/cluster.appscode.com_clusteruserauths.yaml index a1779aeb..371c567f 100644 --- a/crds/cluster.bytebuilders.dev_clusteruserauths.yaml +++ b/crds/cluster.appscode.com_clusteruserauths.yaml @@ -4,9 +4,9 @@ metadata: creationTimestamp: null labels: app.kubernetes.io/name: bytebuilders - name: clusteruserauths.cluster.bytebuilders.dev + name: clusteruserauths.cluster.appscode.com spec: - group: cluster.bytebuilders.dev + group: cluster.appscode.com names: categories: - kubernetes diff --git a/crds/identity.bytebuilders.dev_orgusers.yaml b/crds/identity.bytebuilders.dev_orgusers.yaml deleted file mode 100644 index 188a5b36..00000000 --- a/crds/identity.bytebuilders.dev_orgusers.yaml +++ /dev/null @@ -1,77 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - creationTimestamp: null - labels: - app.kubernetes.io/name: bytebuilders - name: orgusers.identity.bytebuilders.dev -spec: - group: identity.bytebuilders.dev - names: - categories: - - kubernetes - - resource-model - - appscode - kind: OrgUser - listKind: OrgUserList - plural: orgusers - singular: orguser - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - avatarURL: - type: string - email: - type: string - fullName: - type: string - isPublic: - type: boolean - orgID: - format: int64 - type: integer - orgName: - type: string - teamName: - type: string - userID: - format: int64 - type: integer - userName: - type: string - required: - - isPublic - - orgID - - userID - type: object - status: - properties: - observedGeneration: - description: ObservedGeneration is the most recent generation observed - for this resource. It corresponds to the resource's generation, - which is updated on mutation by the API Server. - format: int64 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/crds/identity.bytebuilders.dev_teams.yaml b/crds/identity.bytebuilders.dev_teams.yaml deleted file mode 100644 index 5ffc963d..00000000 --- a/crds/identity.bytebuilders.dev_teams.yaml +++ /dev/null @@ -1,147 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - creationTimestamp: null - labels: - app.kubernetes.io/name: bytebuilders - name: teams.identity.bytebuilders.dev -spec: - group: identity.bytebuilders.dev - names: - categories: - - kubernetes - - resource-model - - appscode - kind: Team - listKind: TeamList - plural: teams - singular: team - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - authorize: - type: string - description: - type: string - lowerName: - type: string - members: - items: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this - representation of an object. Servers should convert recognized - schemas to the latest internal value, and may reject unrecognized - values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource - this object represents. Servers may infer this from the endpoint - the client submits requests to. Cannot be updated. In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - avatarURL: - type: string - email: - type: string - fullName: - type: string - isPublic: - type: boolean - orgID: - format: int64 - type: integer - orgName: - type: string - teamID: - format: int64 - type: integer - teamName: - format: int64 - type: integer - userID: - format: int64 - type: integer - userName: - type: string - required: - - isPublic - - orgID - - teamID - - userID - type: object - status: - properties: - observedGeneration: - description: ObservedGeneration is the most recent generation - observed for this resource. It corresponds to the resource's - generation, which is updated on mutation by the API Server. - format: int64 - type: integer - type: object - type: object - type: array - name: - type: string - numMembers: - format: int64 - type: integer - orgName: - type: string - ownerID: - format: int64 - type: integer - tags: - items: - enum: - - Dev - - Front-End - - Back-End - type: string - type: array - uid: - type: string - required: - - authorize - - lowerName - - name - - numMembers - - ownerID - - tags - - uid - type: object - status: - properties: - observedGeneration: - description: ObservedGeneration is the most recent generation observed - for this resource. It corresponds to the resource's generation, - which is updated on mutation by the API Server. - format: int64 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/crds/identity.bytebuilders.dev_teamusers.yaml b/crds/identity.bytebuilders.dev_teamusers.yaml deleted file mode 100644 index c093a6fb..00000000 --- a/crds/identity.bytebuilders.dev_teamusers.yaml +++ /dev/null @@ -1,82 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - creationTimestamp: null - labels: - app.kubernetes.io/name: bytebuilders - name: teamusers.identity.bytebuilders.dev -spec: - group: identity.bytebuilders.dev - names: - categories: - - kubernetes - - resource-model - - appscode - kind: TeamUser - listKind: TeamUserList - plural: teamusers - singular: teamuser - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - avatarURL: - type: string - email: - type: string - fullName: - type: string - isPublic: - type: boolean - orgID: - format: int64 - type: integer - orgName: - type: string - teamID: - format: int64 - type: integer - teamName: - format: int64 - type: integer - userID: - format: int64 - type: integer - userName: - type: string - required: - - isPublic - - orgID - - teamID - - userID - type: object - status: - properties: - observedGeneration: - description: ObservedGeneration is the most recent generation observed - for this resource. It corresponds to the resource's generation, - which is updated on mutation by the API Server. - format: int64 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/crds/lib_test.go b/crds/lib_test.go index ab85517f..45dbf13f 100644 --- a/crds/lib_test.go +++ b/crds/lib_test.go @@ -35,7 +35,7 @@ func TestCustomResourceDefinition(t *testing.T) { name: "clusterinfos", args: args{ gvr: schema.GroupVersionResource{ - Group: "cluster.bytebuilders.dev", + Group: "cluster.appscode.com", Version: "v1alpha1", Resource: "clusterinfos", }, diff --git a/crds/ui.bytebuilders.dev_editoroptions.yaml b/crds/ui.bytebuilders.dev_editoroptions.yaml deleted file mode 100644 index 59544d9c..00000000 --- a/crds/ui.bytebuilders.dev_editoroptions.yaml +++ /dev/null @@ -1,82 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - creationTimestamp: null - labels: - app.kubernetes.io/name: bytebuilders - name: editoroptions.ui.bytebuilders.dev -spec: - group: ui.bytebuilders.dev - names: - categories: - - ui - - appscode - kind: EditorOption - listKind: EditorOptionList - plural: editoroptions - singular: editoroption - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - category: - description: Category defines the category of the application that - this editor is for (eg, database) - type: string - editor: - description: ChartRepoRef references to a single version of a Chart - properties: - name: - type: string - url: - type: string - version: - type: string - required: - - name - - url - - version - type: object - provider: - description: Provider defines the name of the provider for this application - (eg, kubedb.com) - type: string - type: - description: Type defines the application type (eg, mongods.kubedb.com) - type: string - required: - - category - - editor - - provider - - type - type: object - status: - properties: - observedGeneration: - description: ObservedGeneration is the most recent generation observed - for this resource. It corresponds to the resource's generation, - which is updated on mutation by the API Server. - format: int64 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/crds/ui.bytebuilders.dev_optionseditors.yaml b/crds/ui.bytebuilders.dev_optionseditors.yaml deleted file mode 100644 index cc6303dd..00000000 --- a/crds/ui.bytebuilders.dev_optionseditors.yaml +++ /dev/null @@ -1,107 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - creationTimestamp: null - labels: - app.kubernetes.io/name: bytebuilders - name: optionseditors.ui.bytebuilders.dev -spec: - group: ui.bytebuilders.dev - names: - categories: - - ui - - appscode - kind: OptionsEditor - listKind: OptionsEditorList - plural: optionseditors - singular: optionseditor - scope: Cluster - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - properties: - category: - description: Category defines the category of the application that - this editor is for (eg, database) - type: string - chartRef: - description: ChartRef references a Helm chart with the ui.json - properties: - name: - type: string - url: - type: string - version: - type: string - required: - - name - - url - - version - type: object - provider: - description: Provider defines the name of the provider for this application - (eg, kubedb.com) - type: string - resource: - description: Resource identifies the resource to which this editor - applies - properties: - group: - type: string - kind: - description: Kind is the serialized kind of the resource. It - is normally CamelCase and singular. - type: string - name: - description: 'Name is the plural name of the resource to serve. It - must match the name of the CustomResourceDefinition-registration - too: plural.group and it must be all lowercase.' - type: string - scope: - description: ResourceScope is an enum defining the different scopes - available to a custom resource - type: string - version: - type: string - required: - - group - - kind - - name - - scope - - version - type: object - required: - - category - - chartRef - - provider - - resource - type: object - status: - properties: - observedGeneration: - description: ObservedGeneration is the most recent generation observed - for this resource. It corresponds to the resource's generation, - which is updated on mutation by the API Server. - format: int64 - type: integer - type: object - type: object - served: true - storage: true - subresources: - status: {} diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/aws.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/aws.json index 26adaf72..f17f63b8 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/aws.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/aws.json @@ -1,6 +1,6 @@ { "kind": "CloudProvider", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws", "creationTimestamp": null diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/azure.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/azure.json index ce2b3852..d42a0eab 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/azure.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/azure.json @@ -1,6 +1,6 @@ { "kind": "CloudProvider", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure", "creationTimestamp": null diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/digitalocean.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/digitalocean.json index 20b87325..3b0fcea9 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/digitalocean.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/digitalocean.json @@ -1,6 +1,6 @@ { "kind": "CloudProvider", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean", "creationTimestamp": null diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/gce.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/gce.json index cb748edc..f3371563 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/gce.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/gce.json @@ -1,6 +1,6 @@ { "kind": "CloudProvider", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce", "creationTimestamp": null diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/linode.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/linode.json index 4e3a62bb..a4dfe954 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/linode.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/linode.json @@ -1,6 +1,6 @@ { "kind": "CloudProvider", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode", "creationTimestamp": null diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/packet.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/packet.json index 92b8c9e2..d15ef4c1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/packet.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/packet.json @@ -1,6 +1,6 @@ { "kind": "CloudProvider", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet", "creationTimestamp": null diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/scaleway.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/scaleway.json index ecfdc044..c64c7ac0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/scaleway.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/scaleway.json @@ -1,6 +1,6 @@ { "kind": "CloudProvider", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "scaleway", "creationTimestamp": null diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/vultr.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/vultr.json index d269297a..76d3a2c1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/vultr.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/vultr.json @@ -1,6 +1,6 @@ { "kind": "CloudProvider", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "vultr", "creationTimestamp": null diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.2xlarge.json index 2b447895..dbd3f026 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-a1.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.4xlarge.json index 0a6eac85..cb4a9b17 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-a1.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.large.json index 39531a8f..415da449 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-a1.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.medium.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.medium.json index b24f8862..69a9c162 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.medium.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.medium.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-a1.medium", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.xlarge.json index 7ec502a9..5df704fe 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-a1.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c1.medium.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c1.medium.json index 313ec008..7b756e06 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c1.medium.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c1.medium.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c1.medium", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c1.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c1.xlarge.json index df1310dd..f893e5bd 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c1.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c1.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c1.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.2xlarge.json index 596e1ad4..25f671e0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c3.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.4xlarge.json index 5c46bd07..45aa79c1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c3.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.8xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.8xlarge.json index ff1938af..fd8a988a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.8xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.8xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c3.8xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.large.json index c0cac9c7..8b2c9992 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c3.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.xlarge.json index e54d8bdd..49e63685 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c3.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.2xlarge.json index 32e8e1d0..a1fa1772 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c4.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.4xlarge.json index 5a0ab3b6..fd195e64 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c4.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.8xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.8xlarge.json index 1a7f09be..4f878997 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.8xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.8xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c4.8xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.large.json index cdf3bd37..d9bc2e09 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c4.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.xlarge.json index 3d9b47c3..fddc4329 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c4.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.18xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.18xlarge.json index a78c83d4..70f9d4e0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.18xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.18xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c5.18xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.2xlarge.json index e09ba6d8..256e4162 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c5.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.4xlarge.json index afeb4ecd..854b56f8 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c5.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.9xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.9xlarge.json index 558bc76f..3b326ad2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.9xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.9xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c5.9xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.large.json index d8473bbe..3956905c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c5.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.xlarge.json index 27d0865b..1a47a5e9 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c5.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.18xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.18xlarge.json index 327a42ea..2b106244 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.18xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.18xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c5d.18xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.2xlarge.json index 2badcfa8..cca604a6 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c5d.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.4xlarge.json index 5edffd5b..5945d1d7 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c5d.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.9xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.9xlarge.json index 82c5efe5..91c48877 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.9xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.9xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c5d.9xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.large.json index 90398837..594e8323 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c5d.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.xlarge.json index 213ef0f9..27a42a5d 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c5d.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.18xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.18xlarge.json index ea48b1a9..c22e0d64 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.18xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.18xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c5n.18xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.2xlarge.json index 8fc9f91d..d5dfdddb 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c5n.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.4xlarge.json index 9984f968..d7661492 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c5n.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.9xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.9xlarge.json index 05cae45d..99e7679b 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.9xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.9xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c5n.9xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.large.json index 49453851..5e8617cf 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c5n.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.xlarge.json index f1510bb7..26d3ae40 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-c5n.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-cc2.8xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-cc2.8xlarge.json index 723be9f0..cc303d66 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-cc2.8xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-cc2.8xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-cc2.8xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-cr1.8xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-cr1.8xlarge.json index 2eac5d1a..131f459b 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-cr1.8xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-cr1.8xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-cr1.8xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.2xlarge.json index 9567a64f..3c625020 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-d2.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.4xlarge.json index c8685cb2..e3524c48 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-d2.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.8xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.8xlarge.json index b5d734f4..cb930099 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.8xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.8xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-d2.8xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.xlarge.json index 8e01293e..46590be5 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-d2.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.16xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.16xlarge.json index 82b38040..304a9b9a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.16xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.16xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-f1.16xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.2xlarge.json index c0b80e80..76bc2a2e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-f1.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.4xlarge.json index 65c568e8..4f6fa459 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-f1.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g2.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g2.2xlarge.json index 13153746..6e28a2e5 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g2.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g2.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-g2.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g2.8xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g2.8xlarge.json index 7496378d..0879d139 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g2.8xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g2.8xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-g2.8xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.16xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.16xlarge.json index 734c3949..392aa865 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.16xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.16xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-g3.16xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.4xlarge.json index cad26954..007d9d6c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-g3.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.8xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.8xlarge.json index be8b1523..d769b0b4 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.8xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.8xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-g3.8xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3s.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3s.xlarge.json index 1238726d..c0fe57ec 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3s.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3s.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-g3s.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.16xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.16xlarge.json index 7e9f6ea1..6c334ec2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.16xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.16xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-h1.16xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.2xlarge.json index 422515ed..9d98f3d7 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-h1.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.4xlarge.json index edbde05e..95b5abb8 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-h1.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.8xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.8xlarge.json index b6b272ef..94f49344 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.8xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.8xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-h1.8xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-hs1.8xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-hs1.8xlarge.json index 8b620a47..361e0fff 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-hs1.8xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-hs1.8xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-hs1.8xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.2xlarge.json index 9fc8725c..cb7569af 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-i2.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.4xlarge.json index 5284b45d..e9b2e2e2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-i2.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.8xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.8xlarge.json index af5e744c..be5ba7b8 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.8xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.8xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-i2.8xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.xlarge.json index 8d8bd4d1..f5fdf836 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-i2.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.16xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.16xlarge.json index df1a8744..6e45c3fa 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.16xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.16xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-i3.16xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.2xlarge.json index ff0f2aa3..b24caaa0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-i3.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.4xlarge.json index e8f4ff6a..b5ddc6c6 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-i3.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.8xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.8xlarge.json index 952324d9..5e9d645a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.8xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.8xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-i3.8xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.large.json index ebfc25ad..ce376aab 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-i3.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.metal.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.metal.json index 5a089e46..629c313c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.metal.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.metal.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-i3.metal", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.xlarge.json index f88c80de..ddd4607c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-i3.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.large.json index 71348778..5e18d57c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m1.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.medium.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.medium.json index 923352be..ef0f89c1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.medium.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.medium.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m1.medium", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.small.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.small.json index 9c43900c..1f4c15e1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.small.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.small.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m1.small", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.xlarge.json index a888cb44..bbff8ee6 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m1.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.2xlarge.json index c74a7fe0..d031055d 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m2.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.4xlarge.json index 88ff7c81..e01d1542 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m2.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.xlarge.json index e908a399..cab9c734 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m2.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.2xlarge.json index c23e3397..bfb5eb2a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m3.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.large.json index 9bd3b1e9..ea39ce39 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m3.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.medium.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.medium.json index df511ab8..1228a1d6 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.medium.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.medium.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m3.medium", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.xlarge.json index ca55ec31..f15d7b1c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m3.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.10xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.10xlarge.json index 110199c6..b547e5a0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.10xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.10xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m4.10xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.16xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.16xlarge.json index 4b913aa0..f35bb5d1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.16xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.16xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m4.16xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.2xlarge.json index 07f65c6f..cbb6f5ba 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m4.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.4xlarge.json index 0488e915..6120dc2c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m4.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.large.json index 5f593e2b..fabf745c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m4.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.xlarge.json index 3837b8d5..f5ea1908 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m4.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.12xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.12xlarge.json index c31ef349..a382d257 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.12xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.12xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m5.12xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.24xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.24xlarge.json index 1de6b8e7..1878d51b 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.24xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.24xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m5.24xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.2xlarge.json index be410504..574e3669 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m5.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.4xlarge.json index eaad0e2e..3582b526 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m5.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.large.json index 77b7b48c..28b35e1e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m5.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.xlarge.json index 161094d1..2f2331f1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m5.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.12xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.12xlarge.json index 89c3500d..9c137b9f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.12xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.12xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m5a.12xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.24xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.24xlarge.json index 75cd1c6c..7cd24b93 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.24xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.24xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m5a.24xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.2xlarge.json index f2321608..666d6004 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m5a.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.4xlarge.json index b3e70c59..2754c007 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m5a.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.large.json index 31349c1f..f2cf7aa3 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m5a.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.xlarge.json index 6bdba779..1e274a77 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m5a.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.12xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.12xlarge.json index 021426d0..bd35d40c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.12xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.12xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m5d.12xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.24xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.24xlarge.json index b65ad03a..fe1ade15 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.24xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.24xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m5d.24xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.2xlarge.json index f7dd46d4..fd62af7b 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m5d.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.4xlarge.json index 2c90713d..74506dbc 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m5d.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.large.json index 6d753108..3f893fce 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m5d.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.xlarge.json index c561a2a1..ace0286d 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-m5d.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.16xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.16xlarge.json index 29a0b9a6..7dead0ce 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.16xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.16xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-p2.16xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.8xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.8xlarge.json index 855a7588..0b8c32ed 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.8xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.8xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-p2.8xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.xlarge.json index 431554f7..eb9b6a9b 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-p2.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.16xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.16xlarge.json index 3ffb97fc..ed0e1b15 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.16xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.16xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-p3.16xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.2xlarge.json index a204011d..e2a07f98 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-p3.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.8xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.8xlarge.json index c6c1cfd1..c7766905 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.8xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.8xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-p3.8xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.2xlarge.json index f5157d01..164418fd 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r3.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.4xlarge.json index 025d5945..9b9188f6 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r3.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.8xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.8xlarge.json index 637abb14..fc392ac2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.8xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.8xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r3.8xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.large.json index 40efb422..9e8b304c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r3.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.xlarge.json index 40a620a2..df44a530 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r3.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.16xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.16xlarge.json index 5da49bd9..4e9737c6 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.16xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.16xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r4.16xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.2xlarge.json index 55fbe952..11b42f3f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r4.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.4xlarge.json index f4b84b26..7494342f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r4.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.8xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.8xlarge.json index 9479b7df..b18f02b6 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.8xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.8xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r4.8xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.large.json index b1ea21dc..f8c0e168 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r4.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.xlarge.json index 3394c517..3dbc9e66 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r4.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.12xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.12xlarge.json index bc0cef7e..c4184240 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.12xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.12xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r5.12xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.24xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.24xlarge.json index 06bd894e..51befa71 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.24xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.24xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r5.24xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.2xlarge.json index ab523407..058d43ed 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r5.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.4xlarge.json index 804bfc2b..d3a74768 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r5.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.large.json index c9f97159..76827e41 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r5.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.xlarge.json index 55b7d244..61ab9384 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r5.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.12xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.12xlarge.json index 770b4158..64cb8320 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.12xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.12xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r5a.12xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.24xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.24xlarge.json index 4f31e492..a170f784 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.24xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.24xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r5a.24xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.2xlarge.json index d8661832..55b45ce8 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r5a.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.4xlarge.json index dfb7dac9..9fddf7fc 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r5a.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.large.json index 768b95da..fbbf9326 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r5a.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.xlarge.json index 914e2f22..ccb556ff 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r5a.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.12xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.12xlarge.json index 2f825a29..d23d143f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.12xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.12xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r5d.12xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.24xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.24xlarge.json index 014c219f..3f6dea4a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.24xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.24xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r5d.24xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.2xlarge.json index 6d3d4130..f83abe80 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r5d.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.4xlarge.json index f48ecc67..6b7d0676 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r5d.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.large.json index ea30c6bc..16c32518 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r5d.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.xlarge.json index 9e543f1c..f0d0b51a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-r5d.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t1.micro.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t1.micro.json index de7b53be..f866cebc 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t1.micro.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t1.micro.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-t1.micro", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.2xlarge.json index 397a431a..ef28c97e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-t2.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.large.json index dc3a6deb..af9380c2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-t2.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.medium.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.medium.json index b54e8f68..03c23404 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.medium.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.medium.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-t2.medium", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.micro.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.micro.json index 9f9f94bd..fc77b657 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.micro.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.micro.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-t2.micro", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.nano.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.nano.json index 0d8a0f30..677cdd79 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.nano.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.nano.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-t2.nano", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.small.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.small.json index 70c18703..5f8f1d04 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.small.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.small.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-t2.small", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.xlarge.json index 42a31b13..faaf88bf 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-t2.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.2xlarge.json index c71bba53..0553312f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-t3.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.large.json index a40f2f2b..f8b1236b 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-t3.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.medium.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.medium.json index 3541d419..3a2b9f1e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.medium.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.medium.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-t3.medium", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.micro.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.micro.json index 8336acdb..004c8886 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.micro.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.micro.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-t3.micro", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.nano.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.nano.json index 7d3be21a..f072bec7 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.nano.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.nano.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-t3.nano", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.small.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.small.json index 91634c9b..095f4813 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.small.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.small.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-t3.small", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.xlarge.json index cbe38c11..f371d91b 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-t3.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-12tb1.metal.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-12tb1.metal.json index 9a398146..a0b43403 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-12tb1.metal.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-12tb1.metal.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-u-12tb1.metal", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-6tb1.metal.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-6tb1.metal.json index caeb2b32..c84006cd 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-6tb1.metal.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-6tb1.metal.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-u-6tb1.metal", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-9tb1.metal.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-9tb1.metal.json index f86b4d10..88a073c1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-9tb1.metal.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-9tb1.metal.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-u-9tb1.metal", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1.16xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1.16xlarge.json index 7d6f28ea..e4bc824f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1.16xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1.16xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-x1.16xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1.32xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1.32xlarge.json index 91b5161d..d30219f4 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1.32xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1.32xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-x1.32xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.16xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.16xlarge.json index ed64841e..45cea0a2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.16xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.16xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-x1e.16xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.2xlarge.json index d89febf0..ff62a3f1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-x1e.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.32xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.32xlarge.json index f00d580a..219495b3 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.32xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.32xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-x1e.32xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.4xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.4xlarge.json index b020c490..1dfa66f7 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.4xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.4xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-x1e.4xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.8xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.8xlarge.json index 6fd681c9..5a823bfe 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.8xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.8xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-x1e.8xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.xlarge.json index b01a9834..24280a79 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-x1e.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.12xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.12xlarge.json index 304c2985..255bd75b 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.12xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.12xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-z1d.12xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.2xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.2xlarge.json index e14eeb03..9ba31bfd 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.2xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.2xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-z1d.2xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.3xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.3xlarge.json index b258680c..03fda593 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.3xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.3xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-z1d.3xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.6xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.6xlarge.json index 1ab2801b..2e80f026 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.6xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.6xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-z1d.6xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.large.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.large.json index b9db29d8..2ed1b153 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.large.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.large.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-z1d.large", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.xlarge.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.xlarge.json index d99aeed5..fd9f3b33 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.xlarge.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.xlarge.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "aws-z1d.xlarge", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a0.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a0.json index 27b49350..8e112828 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a0.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a0.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-basic-a0", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a1.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a1.json index bc1c7ad4..d3355158 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a1.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a1.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-basic-a1", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a2.json index 1f7ea731..66609dc0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-basic-a2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a3.json index 8e429406..77f20581 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-basic-a3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a4.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a4.json index ba11ce61..c4236dee 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a4.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a4.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-basic-a4", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a0.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a0.json index 60913fac..54b21538 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a0.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a0.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a0", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a1-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a1-v2.json index c8ebde8c..182f0e8f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a1-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a1-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a1-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a1.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a1.json index ee2b9c7d..9d6da587 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a1.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a1.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a1", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a10.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a10.json index 81a20a3f..22b69bfc 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a10.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a10.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a10", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a11.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a11.json index 089aaa59..71bd59ae 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a11.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a11.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a11", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2-v2.json index 0131dc00..4f5dd87c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a2-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2.json index ad5c8aa2..9e5b20b7 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2m-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2m-v2.json index 65df2612..bdc74909 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2m-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2m-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a2m-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a3.json index 3433b136..e16a08e0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4-v2.json index 587bcc8c..c715cfd4 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a4-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4.json index 89de4feb..8bc8fd8c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a4", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4m-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4m-v2.json index ad6e9331..8cd5423e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4m-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4m-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a4m-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a5.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a5.json index 27f16bb7..d71e5912 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a5.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a5.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a5", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a6.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a6.json index 2910c798..e8ba9e98 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a6.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a6.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a6", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a7.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a7.json index 5f71907f..45440592 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a7.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a7.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a7", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8-v2.json index 2d9c3bb9..befb544d 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a8-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8.json index 97fcd4fc..524141ad 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a8", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8m-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8m-v2.json index 232a554f..b295d93a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8m-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8m-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a8m-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a9.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a9.json index 9fad6046..71f45f80 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a9.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a9.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-a9", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1ls.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1ls.json index d5eb2bf1..513f1a2e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1ls.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1ls.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-b1ls", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1ms.json index 36c95be9..4ced2193 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-b1ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1s.json index 3d4f6b60..8bb98d60 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-b1s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b2ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b2ms.json index 68569b4e..27e05574 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b2ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b2ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-b2ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b2s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b2s.json index e999131f..e889c1ff 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b2s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b2s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-b2s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b4ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b4ms.json index d7143791..4aef418f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b4ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b4ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-b4ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b8ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b8ms.json index 192c3e70..5f1b9050 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b8ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b8ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-b8ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d1-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d1-v2.json index 7520a579..6df8c1cd 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d1-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d1-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d1-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d1.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d1.json index a8424d17..a78ad6b4 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d1.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d1.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d1", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11-v2-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11-v2-promo.json index 9885f088..3c4b7c01 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11-v2-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11-v2-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d11-v2-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11-v2.json index 863600d2..b3de8711 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d11-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11.json index 452e3391..cbf07821 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d11", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12-v2-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12-v2-promo.json index 919e198d..204ce70a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12-v2-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12-v2-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d12-v2-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12-v2.json index 54c1ffd0..71ec56d9 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d12-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12.json index c7621954..db09d88c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d12", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13-v2-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13-v2-promo.json index e4059ec7..a995831a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13-v2-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13-v2-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d13-v2-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13-v2.json index 588578e1..3c41937a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d13-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13.json index 56f10d26..2b111318 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d13", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14-v2-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14-v2-promo.json index 1ab052a5..6dee2d55 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14-v2-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14-v2-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d14-v2-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14-v2.json index 857ab363..0dd1cb85 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d14-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14.json index 26688ebe..da5d92e7 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d14", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d15-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d15-v2.json index e7df2cc9..5a847b7d 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d15-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d15-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d15-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d16-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d16-v3.json index 65fe7860..d310ab4a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d16-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d16-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d16-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d16s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d16s-v3.json index 8639a960..8d17c032 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d16s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d16s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d16s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v2-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v2-promo.json index 986de23c..bcd50080 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v2-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v2-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d2-v2-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v2.json index 0a55b063..f57a6362 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d2-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v3.json index 3fbab564..e91b1442 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d2-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2.json index 8e5831ac..e7f96640 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2s-v3.json index 0a85a963..acaf40f2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d2s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3-v2-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3-v2-promo.json index ab6f04b5..048e8d56 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3-v2-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3-v2-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d3-v2-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3-v2.json index 41043a83..426b74e0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d3-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3.json index 39d54316..6b688a62 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d32-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d32-v3.json index fcb392f3..04405ace 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d32-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d32-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d32-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d32s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d32s-v3.json index 76fbb96a..3340ceb1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d32s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d32s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d32s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v2-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v2-promo.json index a6c6ec80..2546d613 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v2-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v2-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d4-v2-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v2.json index 53c24965..839082dc 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d4-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v3.json index b907f9a9..0032cc95 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d4-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4.json index 613fe3f1..40eac451 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d4", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4s-v3.json index 7052b476..66be5bd8 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d4s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d5-v2-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d5-v2-promo.json index 203df477..a73a98c6 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d5-v2-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d5-v2-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d5-v2-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d5-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d5-v2.json index be993019..8aaa4730 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d5-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d5-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d5-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d64-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d64-v3.json index 68fa2442..cadde05b 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d64-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d64-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d64-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d64s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d64s-v3.json index 4eca95ca..da4762d1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d64s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d64s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d64s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d8-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d8-v3.json index 3816a822..0fb5b79e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d8-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d8-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d8-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d8s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d8s-v3.json index 0e7cf1a7..b69784c7 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d8s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d8s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-d8s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-dc2s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-dc2s.json index a9cf2835..1323153c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-dc2s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-dc2s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-dc2s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-dc4s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-dc4s.json index 8a055bfa..7991e140 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-dc4s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-dc4s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-dc4s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds1-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds1-v2.json index c6b6d72f..dd012bfb 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds1-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds1-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds1-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds1.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds1.json index 0d3e4996..a3d55f86 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds1.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds1.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds1", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-1-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-1-v2.json index debfa5ee..35824b24 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-1-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-1-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds11-1-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-v2-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-v2-promo.json index 57c7073b..82a2f4c0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-v2-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-v2-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds11-v2-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-v2.json index 85d6ca38..9abdb970 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds11-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11.json index f041ef3a..79756f2d 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds11", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-1-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-1-v2.json index 54324b89..a46108c5 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-1-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-1-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds12-1-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-2-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-2-v2.json index 3b94a822..6573bd85 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-2-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-2-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds12-2-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-v2-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-v2-promo.json index c810cfc8..d583a3fb 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-v2-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-v2-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds12-v2-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-v2.json index 65d8dc13..56798516 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds12-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12.json index c0bf37a1..257ac032 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds12", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-2-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-2-v2.json index 0c384b99..dbcab30b 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-2-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-2-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds13-2-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-4-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-4-v2.json index 7096b62e..6066040e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-4-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-4-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds13-4-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-v2-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-v2-promo.json index dd339012..59d60c65 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-v2-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-v2-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds13-v2-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-v2.json index 5356e961..cd07b83d 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds13-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13.json index d00593de..e91fe444 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds13", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-4-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-4-v2.json index 972ea4c8..c0ade4f3 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-4-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-4-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds14-4-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-8-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-8-v2.json index b1100cfd..92d75662 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-8-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-8-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds14-8-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-v2-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-v2-promo.json index 1a017ef6..d6858d3a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-v2-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-v2-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds14-v2-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-v2.json index 0939b826..5a6de468 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds14-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14.json index d0e5d92b..ba4dc55e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds14", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds15-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds15-v2.json index 36397015..62eaa1ff 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds15-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds15-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds15-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2-v2-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2-v2-promo.json index 1f76073c..acb5fcd1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2-v2-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2-v2-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds2-v2-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2-v2.json index 89466e1c..a663fbee 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds2-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2.json index ece5d28f..95112eec 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3-v2-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3-v2-promo.json index 7b161cf9..ddf32895 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3-v2-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3-v2-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds3-v2-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3-v2.json index d9568f5c..2e7ce3f0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds3-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3.json index 72f5a8cd..1160df55 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4-v2-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4-v2-promo.json index 22adb899..d4b2c33c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4-v2-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4-v2-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds4-v2-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4-v2.json index d74a8813..23dfcf89 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds4-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4.json index 62285af3..620b2132 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds4", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds5-v2-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds5-v2-promo.json index aca03047..528f6837 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds5-v2-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds5-v2-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds5-v2-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds5-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds5-v2.json index 4aaf51db..86ab9731 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds5-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds5-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-ds5-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-4s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-4s-v3.json index 0d4ad63b..a8cb1c2b 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-4s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-4s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e16-4s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-8s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-8s-v3.json index 814126b4..1000e6f1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-8s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-8s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e16-8s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-v3.json index a5cbdffd..940fe5b3 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e16-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16s-v3.json index 0a3f6ee5..bcc81e8f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e16s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e2-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e2-v3.json index bea98150..c58d162e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e2-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e2-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e2-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e20-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e20-v3.json index b6093e5b..bc32a7d6 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e20-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e20-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e20-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e20s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e20s-v3.json index 2dd32cdf..f12bc69e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e20s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e20s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e20s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e2s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e2s-v3.json index 8bb99248..864b3f78 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e2s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e2s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e2s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-16s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-16s-v3.json index 8ca314ea..e367e8d8 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-16s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-16s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e32-16s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-8s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-8s-v3.json index 890ac909..268c4f0a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-8s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-8s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e32-8s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-v3.json index 518dbf0e..47c8103a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e32-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32s-v3.json index 53c1fed5..83eb6fa0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e32s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4-2s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4-2s-v3.json index f0ee8112..a2230e2d 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4-2s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4-2s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e4-2s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4-v3.json index eccf6188..acd20864 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e4-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4s-v3.json index 6d33e2c4..e956a307 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e4s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-16s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-16s-v3.json index 84c229a4..578ed36c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-16s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-16s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e64-16s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-32s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-32s-v3.json index f69bd6d7..70654b20 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-32s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-32s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e64-32s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-v3.json index 68daa2e4..23cb2c9e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e64-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64i-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64i-v3.json index 1945578d..5b1f2aee 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64i-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64i-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e64i-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64is-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64is-v3.json index 01dd0fa6..4e61dda2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64is-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64is-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e64is-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64s-v3.json index a20fbd2d..20c9e850 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e64s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-2s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-2s-v3.json index 169d63d0..c2e1d5f6 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-2s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-2s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e8-2s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-4s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-4s-v3.json index c88b44fa..94ca6e0c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-4s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-4s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e8-4s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-v3.json index 30932984..fe3fe847 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e8-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8s-v3.json index 5c25c634..35e8817e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-e8s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f1.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f1.json index bc318e53..77d28ca5 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f1.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f1.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-f1", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16.json index 0a06a2de..85b033c7 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-f16", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16s-v2.json index 068da56d..f6587f77 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-f16s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16s.json index 02c70d91..16421cbc 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-f16s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f1s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f1s.json index c37fdfce..f823f5a7 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f1s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f1s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-f1s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2.json index ab22e563..b50630fb 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-f2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2s-v2.json index bb2d9d5a..53ab5dab 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-f2s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2s.json index f9688af1..a3a4ad50 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-f2s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f32s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f32s-v2.json index f277a481..da3dedf7 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f32s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f32s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-f32s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4.json index 8b64cb76..273c0c1e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-f4", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4s-v2.json index 95c3c03e..15326f92 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-f4s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4s.json index bf3db330..fcb1b428 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-f4s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f64s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f64s-v2.json index 018f72e0..b8366da0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f64s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f64s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-f64s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f72s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f72s-v2.json index 176486ba..7706c2fe 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f72s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f72s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-f72s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8.json index 9e71f4d3..6839f3d1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-f8", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8s-v2.json index 8975b6b3..3f5fab6c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-f8s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8s.json index 4fb6289b..a4739084 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-f8s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g1.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g1.json index 60e010c2..f95345c3 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g1.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g1.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-g1", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g2.json index ed0a138f..e0f6f8f6 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-g2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g3.json index dee95aee..6f6ac400 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-g3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g4.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g4.json index 59a20b40..2664f7a0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g4.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g4.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-g4", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g5.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g5.json index 0b2c0cdb..1445d390 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g5.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g5.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-g5", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs1.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs1.json index d7b0d923..01bbd435 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs1.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs1.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-gs1", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs2.json index efe237d4..b79bea3c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-gs2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs3.json index dc6de24b..9f8597a1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-gs3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4-4.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4-4.json index 6842874a..965b0938 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4-4.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4-4.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-gs4-4", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4-8.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4-8.json index aa863a70..52b1fecf 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4-8.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4-8.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-gs4-8", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4.json index b7affbd6..2e9a140d 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-gs4", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5-16.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5-16.json index c22c84d5..61da6b32 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5-16.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5-16.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-gs5-16", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5-8.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5-8.json index 6eb7ec5a..e53f27d2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5-8.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5-8.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-gs5-8", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5.json index cc127034..17263dc2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-gs5", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16-promo.json index a4111d79..6861e7ed 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-h16-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16.json index d1f21a6a..135f69f7 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-h16", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16m-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16m-promo.json index dc55fcc5..bdc072c0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16m-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16m-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-h16m-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16m.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16m.json index 9c27d48c..ab842174 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16m.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16m.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-h16m", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16mr-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16mr-promo.json index fcd09de0..ce9151a0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16mr-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16mr-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-h16mr-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16mr.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16mr.json index e0929390..6cb955e2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16mr.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16mr.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-h16mr", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16r-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16r-promo.json index 11359cb8..6a383174 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16r-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16r-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-h16r-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16r.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16r.json index 52aeca73..57072b07 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16r.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16r.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-h16r", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8-promo.json index d6c4c1f8..80a3c7f0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-h8-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8.json index cb3051dd..41f7139c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-h8", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8m-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8m-promo.json index 342e3d35..67aca744 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8m-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8m-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-h8m-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8m.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8m.json index 7ca0dd1a..1816ea7c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8m.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8m.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-h8m", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-hb60rs.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-hb60rs.json index 5cbff79b..2f008179 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-hb60rs.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-hb60rs.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-hb60rs", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-hc44rs.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-hc44rs.json index 7884b8b4..a53c4055 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-hc44rs.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-hc44rs.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-hc44rs", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l16s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l16s-v2.json index 844b6cfa..c71dde9a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l16s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l16s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-l16s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l16s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l16s.json index 7814d210..33fc756d 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l16s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l16s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-l16s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l32s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l32s-v2.json index b1ad600b..ec88ba33 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l32s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l32s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-l32s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l32s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l32s.json index 8fb4bece..79968e2b 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l32s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l32s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-l32s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l4s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l4s.json index 81b16641..f7a75a3f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l4s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l4s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-l4s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l64s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l64s-v2.json index 92127252..1657dc27 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l64s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l64s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-l64s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l80s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l80s-v2.json index d1cf02f0..94cbecc5 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l80s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l80s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-l80s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l8s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l8s-v2.json index aa87adee..ffa41c78 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l8s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l8s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-l8s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l8s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l8s.json index b0023e64..ff83ba41 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l8s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l8s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-l8s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128-32ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128-32ms.json index 31dcb8d1..4c33ea51 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128-32ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128-32ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m128-32ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128-64ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128-64ms.json index 2b3818c6..a1a98293 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128-64ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128-64ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m128-64ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128.json index 7683adef..22167d95 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m128", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128m.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128m.json index 45d30f79..b081d75d 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128m.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128m.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m128m", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128ms.json index 15d027b9..9d699fd4 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m128ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128s.json index b84d3c18..271f7b33 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m128s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16-4ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16-4ms.json index 9937b70e..573c6175 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16-4ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16-4ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m16-4ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16-8ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16-8ms.json index b7484544..1b0e2f56 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16-8ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16-8ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m16-8ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16ms.json index d0513810..a4d67224 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m16ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m208ms-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m208ms-v2.json index e79cf083..3e0ba6bd 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m208ms-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m208ms-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m208ms-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m208s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m208s-v2.json index b762acca..cdfbf4e5 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m208s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m208s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m208s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32-16ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32-16ms.json index ebbb5f53..a2f36282 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32-16ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32-16ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m32-16ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32-8ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32-8ms.json index 96c0f411..c82cb40a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32-8ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32-8ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m32-8ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ls.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ls.json index 3600d9a7..67197b73 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ls.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ls.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m32ls", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ms.json index 012c9d4a..84b8c44b 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m32ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ts.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ts.json index 66def277..fff0c631 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ts.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ts.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m32ts", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64-16ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64-16ms.json index ba6594f3..a09c4c6b 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64-16ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64-16ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m64-16ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64-32ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64-32ms.json index 41d195de..ef2bda92 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64-32ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64-32ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m64-32ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64.json index f633bedd..72dc0d07 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m64", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64ls.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64ls.json index 2c79f3ad..8f25d9a1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64ls.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64ls.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m64ls", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64m.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64m.json index 99ed29f4..ed7c7933 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64m.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64m.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m64m", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64ms.json index 778c9434..4a5ae41f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m64ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64s.json index a65bb0ff..914685ff 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m64s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8-2ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8-2ms.json index 6216b0d7..414e5c80 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8-2ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8-2ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m8-2ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8-4ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8-4ms.json index 4ee4f4f2..e85f4bf2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8-4ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8-4ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m8-4ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8ms.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8ms.json index 0328b364..be7e4826 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8ms.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8ms.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-m8ms", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12-promo.json index 8488ac36..8d559067 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nc12-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12.json index 3948708e..d0cea13d 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nc12", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12s-v2.json index 57967e23..c3d1aa07 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nc12s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12s-v3.json index aa4cf39d..afb2aa45 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nc12s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24-promo.json index a495e915..7ec4a327 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nc24-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24.json index 926e8f32..3c57d101 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nc24", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24r-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24r-promo.json index 17ace04b..324b5740 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24r-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24r-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nc24r-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24r.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24r.json index 701d79cd..cedbd083 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24r.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24r.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nc24r", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24rs-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24rs-v2.json index 5f00953d..5425569c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24rs-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24rs-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nc24rs-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24rs-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24rs-v3.json index 29c9e5a5..05d64479 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24rs-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24rs-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nc24rs-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24s-v2.json index 2e9e5c5a..c502aff7 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nc24s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24s-v3.json index 732bfd1d..36d1c5b2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nc24s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6-promo.json index 72869ceb..c83b9470 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nc6-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6.json index 5ad5e51e..4e2c6a0e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nc6", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6s-v2.json index 584fbcba..47b14ce9 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nc6s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6s-v3.json index 713b53e5..e45c987e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nc6s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd12s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd12s.json index 25f2ed53..925dc24b 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd12s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd12s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nd12s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd24rs.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd24rs.json index 6980b3b5..f362d202 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd24rs.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd24rs.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nd24rs", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd24s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd24s.json index c5ee83f9..5f2e2721 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd24s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd24s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nd24s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd6s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd6s.json index 7ecf2fe5..4cf7cc19 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd6s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd6s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nd6s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12-promo.json index 61c7de8d..4242b829 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nv12-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12.json index b5b700aa..7d71d8db 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nv12", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12s-v2.json index dea7cb8e..0557d455 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nv12s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12s-v3.json index ea378435..59515bf6 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nv12s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24-promo.json index 4d121d1b..fb188df2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nv24-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24.json index 1c20c540..b6c21ce2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nv24", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24s-v2.json index 87784af3..d210a12f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nv24s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24s-v3.json index 97aea087..d7f70cd9 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nv24s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv48s-v3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv48s-v3.json index b88849a6..8426a215 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv48s-v3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv48s-v3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nv48s-v3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6-promo.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6-promo.json index 3420fe49..4300304f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6-promo.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6-promo.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nv6-promo", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6.json index 9bdcc6fc..f1005f10 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nv6", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6s-v2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6s-v2.json index 28031fb7..ea43b72f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6s-v2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6s-v2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-nv6s-v2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-pb6s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-pb6s.json index 2d66380b..8a6069f6 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-pb6s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-pb6s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "azure-standard-pb6s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-16gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-16gb.json index 585b1e53..31476333 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-16gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-16gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-16gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-1gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-1gb.json index 9227b5fe..5d6b9a28 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-1gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-1gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-1gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-2gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-2gb.json index 77d0d061..696b881f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-2gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-2gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-2gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-4gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-4gb.json index 0303f52c..5debc202 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-4gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-4gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-4gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-512mb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-512mb.json index c6c83680..fe2fbe37 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-512mb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-512mb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-512mb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-8gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-8gb.json index 257b1bc0..a162d213 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-8gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-8gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-8gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-c-2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-c-2.json index 5322cb36..3fd210a7 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-c-2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-c-2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-c-2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-c-4.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-c-4.json index 872df273..62c9863f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-c-4.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-c-4.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-c-4", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-g-2vcpu-8gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-g-2vcpu-8gb.json index decaaef8..073e0b5b 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-g-2vcpu-8gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-g-2vcpu-8gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-g-2vcpu-8gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-g-4vcpu-16gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-g-4vcpu-16gb.json index cbae30a9..e697e151 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-g-4vcpu-16gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-g-4vcpu-16gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-g-4vcpu-16gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-gd-2vcpu-8gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-gd-2vcpu-8gb.json index 244bb3bb..e4d52fee 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-gd-2vcpu-8gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-gd-2vcpu-8gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-gd-2vcpu-8gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-gd-4vcpu-16gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-gd-4vcpu-16gb.json index 1a2d9f30..c67d87c0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-gd-4vcpu-16gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-gd-4vcpu-16gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-gd-4vcpu-16gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-1gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-1gb.json index da856ffb..16dc33a0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-1gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-1gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-s-1vcpu-1gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-2gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-2gb.json index 87487b61..4a68af5c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-2gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-2gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-s-1vcpu-2gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-3gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-3gb.json index 76ff80c7..76342aeb 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-3gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-3gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-s-1vcpu-3gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-2vcpu-2gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-2vcpu-2gb.json index ea4c070c..82f2e121 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-2vcpu-2gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-2vcpu-2gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-s-2vcpu-2gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-2vcpu-4gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-2vcpu-4gb.json index 52f17ef5..d72ce40b 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-2vcpu-4gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-2vcpu-4gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-s-2vcpu-4gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-3vcpu-1gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-3vcpu-1gb.json index c56ccf98..b8e67a05 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-3vcpu-1gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-3vcpu-1gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-s-3vcpu-1gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-4vcpu-8gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-4vcpu-8gb.json index 03f3ea6c..c6ba6a2f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-4vcpu-8gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-4vcpu-8gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-s-4vcpu-8gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-6vcpu-16gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-6vcpu-16gb.json index af9f1e9f..2e73bdf0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-6vcpu-16gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-6vcpu-16gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "digitalocean-s-6vcpu-16gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-f1-micro.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-f1-micro.json index 3a33beeb..0fc14e0a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-f1-micro.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-f1-micro.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-f1-micro", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-g1-small.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-g1-small.json index 32727efa..18880313 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-g1-small.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-g1-small.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-g1-small", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-16.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-16.json index c214a113..17bad85c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-16.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-16.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-highcpu-16", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-2.json index 7234dcd3..dfaa4477 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-highcpu-2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-32.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-32.json index 2e9f1e5d..7bbdc17a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-32.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-32.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-highcpu-32", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-4.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-4.json index 43af93aa..1a7f5459 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-4.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-4.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-highcpu-4", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-64.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-64.json index c2551bfd..af8412a1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-64.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-64.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-highcpu-64", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-8.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-8.json index 1bc04189..187bbf63 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-8.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-8.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-highcpu-8", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-96.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-96.json index 8eaf74d4..56dc6bea 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-96.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-96.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-highcpu-96", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-16.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-16.json index d535be68..3b5d76c9 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-16.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-16.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-highmem-16", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-2.json index 87fe4abe..178a0db5 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-highmem-2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-32.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-32.json index ab37ed58..ae2d6e43 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-32.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-32.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-highmem-32", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-4.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-4.json index 7716290b..96e096e1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-4.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-4.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-highmem-4", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-64.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-64.json index a2229530..d56a2918 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-64.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-64.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-highmem-64", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-8.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-8.json index 21f48b78..e26934aa 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-8.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-8.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-highmem-8", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-96.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-96.json index 0ca29059..a5bfb845 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-96.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-96.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-highmem-96", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-megamem-96.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-megamem-96.json index 58ecd1e3..78bc914e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-megamem-96.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-megamem-96.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-megamem-96", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-1.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-1.json index 4de8eefb..022780d6 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-1.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-1.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-standard-1", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-16.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-16.json index 38e41858..7f7e8d22 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-16.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-16.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-standard-16", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-2.json index a043373e..1220f5f2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-standard-2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-32.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-32.json index 84b75194..ccec0fc1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-32.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-32.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-standard-32", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-4.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-4.json index 6c39c5c8..81378f02 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-4.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-4.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-standard-4", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-64.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-64.json index a9ee9868..dae185ae 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-64.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-64.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-standard-64", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-8.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-8.json index b20f6487..529af61e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-8.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-8.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-standard-8", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-96.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-96.json index 43e3a619..c3a2bc34 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-96.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-96.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-standard-96", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-160.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-160.json index fd9ac391..dd50d5ce 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-160.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-160.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-ultramem-160", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-40.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-40.json index 1c4423f7..af560ca2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-40.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-40.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-ultramem-40", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-80.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-80.json index dbac0cb3..cba2c82f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-80.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-80.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "gce-n1-ultramem-80", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-16.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-16.json index 27bf8323..efec49a9 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-16.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-16.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-dedicated-16", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-2.json index 6177518e..2c63afcf 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-dedicated-2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-32.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-32.json index ea48cac6..eaad17ac 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-32.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-32.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-dedicated-32", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-4.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-4.json index 24fba15d..3ef6ed41 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-4.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-4.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-dedicated-4", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-48.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-48.json index 339e4c06..68538c03 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-48.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-48.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-dedicated-48", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-8.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-8.json index 8b6ff533..62ed5097 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-8.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-8.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-dedicated-8", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-1.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-1.json index 0783985a..d06f4b2f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-1.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-1.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-highmem-1", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-16.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-16.json index 521df642..5392b4c2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-16.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-16.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-highmem-16", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-2.json index 956c1e55..700fc21a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-highmem-2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-4.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-4.json index 03832214..75732c4f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-4.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-4.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-highmem-4", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-8.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-8.json index 9aa5e6f1..efbfeb20 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-8.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-8.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-highmem-8", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-nanode-1.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-nanode-1.json index c9b5ecd6..92c97626 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-nanode-1.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-nanode-1.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-nanode-1", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-1.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-1.json index 2a279cc4..5a41bfc7 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-1.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-1.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-standard-1", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-16.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-16.json index 28ce0473..de22679c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-16.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-16.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-standard-16", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-2.json index 997d07a7..85d869b5 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-standard-2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-20.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-20.json index d55fa49a..25fe71fc 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-20.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-20.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-standard-20", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-24.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-24.json index 65772d7d..8b5a7061 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-24.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-24.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-standard-24", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-32.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-32.json index 217fc324..f4a95f16 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-32.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-32.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-standard-32", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-4.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-4.json index 6eb616f4..15e67ed5 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-4.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-4.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-standard-4", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-6.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-6.json index ae0f183b..be66c252 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-6.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-6.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-standard-6", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-8.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-8.json index 9db5a942..04f58f4e 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-8.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-8.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "linode-g6-standard-8", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-128gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-128gb.json index a77d95cb..0a2319e3 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-128gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-128gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-arm64-128gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-16gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-16gb.json index 4900812a..fdba88aa 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-16gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-16gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-arm64-16gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-2gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-2gb.json index 30b1c123..549614bd 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-2gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-2gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-arm64-2gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-32gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-32gb.json index e87e983a..7aa905f9 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-32gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-32gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-arm64-32gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-4gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-4gb.json index a1030f3c..56ee63b3 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-4gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-4gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-arm64-4gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-64gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-64gb.json index 613728d1..a963f3ab 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-64gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-64gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-arm64-64gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-8gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-8gb.json index 8bcf4cde..69c21e13 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-8gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-8gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-arm64-8gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-0.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-0.json index cb952c17..03eb1276 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-0.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-0.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-baremetal-0", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-1.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-1.json index 625eab34..548cdbaa 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-1.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-1.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-baremetal-1", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-1e.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-1e.json index 89acc13f..ddb166c2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-1e.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-1e.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-baremetal-1e", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-2.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-2.json index a75a344b..b13ffcf8 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-2.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-2.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-baremetal-2", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-2a.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-2a.json index b99e555f..81f8b3d8 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-2a.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-2a.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-baremetal-2a", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-3.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-3.json index 49a9bc7b..e4c235db 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-3.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-3.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-baremetal-3", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-s.json index 781052c3..df2b761c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-baremetal-s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c1.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c1.json index b5b76cfb..b95a40dd 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c1.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c1.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-c1", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2.large.arm.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2.large.arm.json index a7f1124f..719b9797 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2.large.arm.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2.large.arm.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-c2.large.arm", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2.medium.x86.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2.medium.x86.json index 1a868acc..0c4c722c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2.medium.x86.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2.medium.x86.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-c2.medium.x86", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2l.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2l.json index aa0cb58d..a5027320 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2l.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2l.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-c2l", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2m.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2m.json index d2d4d81d..16f17b73 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2m.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2m.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-c2m", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2s.json index a9aa6ab5..70b6922f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-c2s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-l.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-l.json index a09bf48c..dccd8f15 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-l.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-l.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-dev1-l", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-m.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-m.json index d7d762c4..19387d2b 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-m.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-m.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-dev1-m", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-s.json index 15585dd5..d3e97df5 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-dev1-s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-xl.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-xl.json index 9988fb06..97d260a4 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-xl.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-xl.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-dev1-xl", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-g2.large.x86.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-g2.large.x86.json index 1c465bd9..b9b38b61 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-g2.large.x86.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-g2.large.x86.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-g2.large.x86", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-l.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-l.json index 515f76a9..d47789ad 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-l.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-l.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-gp1-l", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-m.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-m.json index ac279296..63dd46b2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-m.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-m.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-gp1-m", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-s.json index 250330fe..fdaed1aa 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-gp1-s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-xl.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-xl.json index ab518a47..41963996 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-xl.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-xl.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-gp1-xl", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-xs.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-xs.json index 8d1ccf94..27aaf9c4 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-xs.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-xs.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-gp1-xs", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-m2.xlarge.x86.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-m2.xlarge.x86.json index 40e30708..7916c59c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-m2.xlarge.x86.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-m2.xlarge.x86.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-m2.xlarge.x86", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-l.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-l.json index 742d2d1e..3839c01a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-l.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-l.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-pro1-l", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-m.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-m.json index d6802038..b150acbc 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-m.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-m.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-pro1-m", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-s.json index 94ab01b5..f8c7dbe1 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-pro1-s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-xl.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-xl.json index 36c58072..ca14ddae 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-xl.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-xl.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-pro1-xl", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-xs.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-xs.json index 1d466fce..a4570586 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-xs.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-xs.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-pro1-xs", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-render-s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-render-s.json index f2fe8b08..63d978b0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-render-s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-render-s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-render-s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-l.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-l.json index 152b6a83..4d6b91a0 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-l.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-l.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-start1-l", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-m.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-m.json index b0e3f4c5..f0fe090d 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-m.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-m.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-start1-m", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-s.json index 1300a982..d453eb5d 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-start1-s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-xs.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-xs.json index 3a916bfa..335d0903 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-xs.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-xs.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-start1-xs", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1l.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1l.json index 8a49df80..130217fa 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1l.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1l.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-vc1l", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1m.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1m.json index 4adc8bb6..d0e99b9f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1m.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1m.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-vc1m", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1s.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1s.json index ea7f9c05..d8c9ea9a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1s.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1s.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-vc1s", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x2.xlarge.x86.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x2.xlarge.x86.json index 815704b5..c57e47f6 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x2.xlarge.x86.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x2.xlarge.x86.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-x2.xlarge.x86", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-120gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-120gb.json index 7ed7e416..e00e127c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-120gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-120gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-x64-120gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-15gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-15gb.json index 6e2a4809..044abdca 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-15gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-15gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-x64-15gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-30gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-30gb.json index e7e037cf..5246e356 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-30gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-30gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-x64-30gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-60gb.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-60gb.json index 35934ee7..3426aefd 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-60gb.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-60gb.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "packet-x64-60gb", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-115.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-115.json index ad0ed53d..fe0b40a5 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-115.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-115.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "vultr-115", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-116.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-116.json index 198e73d2..66edb867 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-116.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-116.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "vultr-116", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-117.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-117.json index f41bb4b3..064414cd 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-117.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-117.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "vultr-117", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-118.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-118.json index 3fe6840d..1f00be5a 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-118.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-118.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "vultr-118", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-201.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-201.json index fbeecd03..275f75a9 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-201.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-201.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "vultr-201", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-202.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-202.json index ec20187b..9e70d23f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-202.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-202.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "vultr-202", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-203.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-203.json index 1d2f3682..5513abf5 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-203.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-203.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "vultr-203", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-204.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-204.json index 0016656c..9d2bcbae 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-204.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-204.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "vultr-204", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-205.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-205.json index a48b9328..556a728c 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-205.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-205.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "vultr-205", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-206.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-206.json index 868a1b4e..823d5289 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-206.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-206.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "vultr-206", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-207.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-207.json index db97b523..4289b02d 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-207.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-207.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "vultr-207", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-208.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-208.json index bd12d594..76ac3bdb 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-208.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-208.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "vultr-208", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-87.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-87.json index 8d3edfc9..8a473da2 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-87.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-87.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "vultr-87", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-88.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-88.json index bbfc70b0..44335301 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-88.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-88.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "vultr-88", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-89.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-89.json index 81feb4e2..c2be7e85 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-89.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-89.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "vultr-89", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-90.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-90.json index 8fcf986f..48c86619 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-90.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-90.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "vultr-90", "creationTimestamp": null, diff --git a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-91.json b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-91.json index f46917db..5a97e22f 100755 --- a/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-91.json +++ b/data/json/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-91.json @@ -1,6 +1,6 @@ { "kind": "MachineType", - "apiVersion": "cloud.bytebuilders.dev/v1alpha1", + "apiVersion": "cloud.appscode.com/v1alpha1", "metadata": { "name": "vultr-91", "creationTimestamp": null, diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/aws.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/aws.yaml index c38c0adc..580c1fe9 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/aws.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/aws.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: CloudProvider metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/azure.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/azure.yaml index 4d295209..9ea64b6e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/azure.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/azure.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: CloudProvider metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/digitalocean.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/digitalocean.yaml index d021a819..e1303fe5 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/digitalocean.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/digitalocean.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: CloudProvider metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/gce.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/gce.yaml index ee054964..d6fd7346 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/gce.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/gce.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: CloudProvider metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/linode.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/linode.yaml index b760b08d..0519c903 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/linode.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/linode.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: CloudProvider metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/packet.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/packet.yaml index 1f05ec30..e711f1f7 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/packet.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/packet.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: CloudProvider metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/scaleway.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/scaleway.yaml index abac9944..a88ae541 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/scaleway.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/scaleway.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: CloudProvider metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/vultr.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/vultr.yaml index a0d41efa..172f629f 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/vultr.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/cloudproviders/vultr.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: CloudProvider metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.2xlarge.yaml index f6f1bcb3..7d053c51 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.4xlarge.yaml index be5c4bed..b74d027d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.large.yaml index 696fdfb3..6ed263c4 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.medium.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.medium.yaml index 17840609..ffbbcf24 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.medium.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.medium.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.xlarge.yaml index 2fd69250..04cbcbe7 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-a1.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c1.medium.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c1.medium.yaml index a1315054..cd5e74d8 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c1.medium.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c1.medium.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c1.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c1.xlarge.yaml index f35d6fe0..e853a326 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c1.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c1.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.2xlarge.yaml index 5d7be650..1bfc2061 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.4xlarge.yaml index 9ea2b0ac..d7124784 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.8xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.8xlarge.yaml index 3fc9c5c5..8716c6b3 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.8xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.8xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.large.yaml index 6282936c..38884015 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.xlarge.yaml index 8f67918f..c67888e2 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c3.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.2xlarge.yaml index 189d881d..87e072dc 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.4xlarge.yaml index c8dae3d4..be931958 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.8xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.8xlarge.yaml index b4bb5d61..44a32abf 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.8xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.8xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.large.yaml index 53563344..551c19e7 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.xlarge.yaml index 2024921e..47dd64ce 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c4.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.18xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.18xlarge.yaml index b067a5dd..fab9cb93 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.18xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.18xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.2xlarge.yaml index eb84933d..e7438316 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.4xlarge.yaml index c44c8c1d..62319267 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.9xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.9xlarge.yaml index a6669c04..f1737943 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.9xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.9xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.large.yaml index 61d7504d..e29d6701 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.xlarge.yaml index 5c6b8120..fd1e89a6 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.18xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.18xlarge.yaml index ed60cd3d..9d4faf12 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.18xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.18xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.2xlarge.yaml index 5a7923c7..79c114fe 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.4xlarge.yaml index 94e23a05..55833791 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.9xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.9xlarge.yaml index 8387502c..fc0b1efb 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.9xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.9xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.large.yaml index 4b225716..759b21ad 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.xlarge.yaml index bc7e521b..6db70627 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5d.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.18xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.18xlarge.yaml index 67848906..a09e7bff 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.18xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.18xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.2xlarge.yaml index 86abd3c6..b9b74bdc 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.4xlarge.yaml index f1d1c6b8..9b74829f 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.9xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.9xlarge.yaml index 92fdad2c..88b6ec9d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.9xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.9xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.large.yaml index 16d103cc..f57b43ee 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.xlarge.yaml index fa6856fb..47ed9ff0 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-c5n.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-cc2.8xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-cc2.8xlarge.yaml index ad612e39..acf84851 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-cc2.8xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-cc2.8xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-cr1.8xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-cr1.8xlarge.yaml index d0a0eb71..43670ee6 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-cr1.8xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-cr1.8xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.2xlarge.yaml index 37e006b9..06e1242e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.4xlarge.yaml index 378a9efe..2fd212a8 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.8xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.8xlarge.yaml index d7a7506c..0283ab38 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.8xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.8xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.xlarge.yaml index aed45ed0..5cdeae43 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-d2.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.16xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.16xlarge.yaml index 76038da3..85fcc3fc 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.16xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.16xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.2xlarge.yaml index a3a89323..1be6d8a2 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.4xlarge.yaml index 03d7a9c6..ed48f0c1 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-f1.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g2.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g2.2xlarge.yaml index ce340d7c..dbe00f4c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g2.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g2.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g2.8xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g2.8xlarge.yaml index 03caefac..66f7e182 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g2.8xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g2.8xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.16xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.16xlarge.yaml index 80f464bc..7f919865 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.16xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.16xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.4xlarge.yaml index 6e1361e6..ca7709b8 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.8xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.8xlarge.yaml index e3501c56..26ee1ddd 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.8xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3.8xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3s.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3s.xlarge.yaml index 3f93ddb3..3a72de85 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3s.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-g3s.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.16xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.16xlarge.yaml index 03ffa3be..e6338eea 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.16xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.16xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.2xlarge.yaml index 25ce4b5e..f315f339 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.4xlarge.yaml index cc12351b..53a02e5a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.8xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.8xlarge.yaml index 9f43c0cc..91163561 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.8xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-h1.8xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-hs1.8xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-hs1.8xlarge.yaml index 8b609453..3d677889 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-hs1.8xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-hs1.8xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.2xlarge.yaml index ab120a90..20171031 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.4xlarge.yaml index 49e27f11..587df83c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.8xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.8xlarge.yaml index 9b68bae1..d3808c04 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.8xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.8xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.xlarge.yaml index 467bb80d..7f104414 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i2.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.16xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.16xlarge.yaml index dcc5a92b..17071250 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.16xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.16xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.2xlarge.yaml index cc62a56c..aab8c321 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.4xlarge.yaml index 81f0aa5c..84a4d6de 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.8xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.8xlarge.yaml index ed0e54da..30fd3b76 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.8xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.8xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.large.yaml index b6d54f66..099930bf 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.metal.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.metal.yaml index 10f878c2..c846f3ce 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.metal.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.metal.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.xlarge.yaml index bdff8630..29be8ed4 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-i3.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.large.yaml index 3b9de3a8..ad6238fa 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.medium.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.medium.yaml index 0a6ae846..dabae3b9 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.medium.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.medium.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.small.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.small.yaml index 3c4a2226..57bd6aa9 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.small.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.small.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.xlarge.yaml index f5ba7cf1..2ba6d1fe 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m1.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.2xlarge.yaml index b5d21517..01f2df20 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.4xlarge.yaml index a7b6bd81..77202cd2 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.xlarge.yaml index a7f51afe..26a01175 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m2.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.2xlarge.yaml index 3429754b..72d9c10a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.large.yaml index ae12f8b4..56b39f34 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.medium.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.medium.yaml index 22da6270..6cf99c8d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.medium.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.medium.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.xlarge.yaml index 6b5127cb..db05d9da 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m3.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.10xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.10xlarge.yaml index 59913574..02f20488 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.10xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.10xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.16xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.16xlarge.yaml index 3e524111..6ad611d6 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.16xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.16xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.2xlarge.yaml index a398c97d..1ecce10f 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.4xlarge.yaml index 8e2bf7fc..41c580da 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.large.yaml index f9291e44..7c5a5174 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.xlarge.yaml index a2426775..6531f5ad 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m4.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.12xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.12xlarge.yaml index d37195b3..e7e1cc94 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.12xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.12xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.24xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.24xlarge.yaml index 5851ed76..a7c80dba 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.24xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.24xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.2xlarge.yaml index 2586f16c..5acd486a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.4xlarge.yaml index 5d966cd9..c47b0471 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.large.yaml index 1b1ac33d..f375cc60 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.xlarge.yaml index a50e83a7..9b7055aa 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.12xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.12xlarge.yaml index 53461337..03ba2212 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.12xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.12xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.24xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.24xlarge.yaml index bc08a600..a04b62b9 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.24xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.24xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.2xlarge.yaml index 19c93565..a30d0bdd 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.4xlarge.yaml index 98e9b96a..04643f14 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.large.yaml index d22619d7..2bd7aa64 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.xlarge.yaml index daa891d3..24dd5e06 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5a.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.12xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.12xlarge.yaml index 75398f61..a4b3a989 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.12xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.12xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.24xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.24xlarge.yaml index 06ea0970..93edeb10 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.24xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.24xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.2xlarge.yaml index f8ca3705..17fcd851 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.4xlarge.yaml index 04eaa754..d34ac94d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.large.yaml index 4efc8242..b8b46b3d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.xlarge.yaml index 83b07b27..167a18bf 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-m5d.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.16xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.16xlarge.yaml index ea76f6ec..1e6fc77f 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.16xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.16xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.8xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.8xlarge.yaml index 47cfb8fb..5bdb7616 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.8xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.8xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.xlarge.yaml index b1055c2f..c450f75c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p2.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.16xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.16xlarge.yaml index a20bddbe..2ea47bcf 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.16xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.16xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.2xlarge.yaml index 1bf543fb..efe98ef3 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.8xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.8xlarge.yaml index a1ced42c..d1eec7e5 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.8xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-p3.8xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.2xlarge.yaml index bb3b2320..1f3fa568 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.4xlarge.yaml index 371a7045..50124a0d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.8xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.8xlarge.yaml index e5ef9289..490c5f99 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.8xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.8xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.large.yaml index 77fbd7a0..20bba3a7 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.xlarge.yaml index 8630bb76..f92684fd 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r3.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.16xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.16xlarge.yaml index be8189c7..740c13d0 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.16xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.16xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.2xlarge.yaml index b8001c3a..c9812c98 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.4xlarge.yaml index 8c4841e3..6cbfd630 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.8xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.8xlarge.yaml index 59294929..b769d88b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.8xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.8xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.large.yaml index ecc0025a..cdd5029a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.xlarge.yaml index f214c85c..038fc065 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r4.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.12xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.12xlarge.yaml index e38d62ad..d191c4e9 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.12xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.12xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.24xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.24xlarge.yaml index 49227292..526beb10 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.24xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.24xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.2xlarge.yaml index 975b6130..e42e490b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.4xlarge.yaml index 5a978869..ff8bff2f 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.large.yaml index 87265ff6..f4b17516 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.xlarge.yaml index 249a9650..898ae939 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.12xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.12xlarge.yaml index 7cd80f82..78bad518 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.12xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.12xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.24xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.24xlarge.yaml index 094351a9..a630f083 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.24xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.24xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.2xlarge.yaml index f8bef42f..f00098d9 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.4xlarge.yaml index a7f5592b..9b17568c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.large.yaml index c22aaf74..795f1c82 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.xlarge.yaml index 6d3e1dda..97bb14fc 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5a.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.12xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.12xlarge.yaml index 5903ab00..7f4c6261 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.12xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.12xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.24xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.24xlarge.yaml index 03a8a348..e717d195 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.24xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.24xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.2xlarge.yaml index c60e56bf..43fcfb2c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.4xlarge.yaml index fe26c104..cb6ae231 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.large.yaml index 49db3a77..1f26488c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.xlarge.yaml index 8fab64d3..bea8d41b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-r5d.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t1.micro.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t1.micro.yaml index db0e2ca6..0fcf3bd1 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t1.micro.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t1.micro.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.2xlarge.yaml index 5940591a..c42f24ef 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.large.yaml index 2f32f93d..3f5bf87f 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.medium.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.medium.yaml index d5347bb0..dcdf6bdc 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.medium.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.medium.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.micro.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.micro.yaml index dadbc3a2..0017d48e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.micro.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.micro.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.nano.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.nano.yaml index fd82c779..c450cef7 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.nano.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.nano.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.small.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.small.yaml index 3fd60c69..feaa16bb 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.small.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.small.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.xlarge.yaml index 62a7126b..50c388f4 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t2.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.2xlarge.yaml index ddf77c3c..c95ed16e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.large.yaml index a8bfff9c..6dc8e9fc 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.medium.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.medium.yaml index e93f556b..f57460ed 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.medium.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.medium.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.micro.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.micro.yaml index d8812ba7..a58c9dc3 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.micro.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.micro.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.nano.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.nano.yaml index a70b7fe1..e8d3e216 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.nano.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.nano.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.small.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.small.yaml index f49924d4..c30e4c17 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.small.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.small.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.xlarge.yaml index d99c4d35..542b127b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-t3.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-12tb1.metal.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-12tb1.metal.yaml index 0833bfa7..ebca3813 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-12tb1.metal.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-12tb1.metal.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-6tb1.metal.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-6tb1.metal.yaml index 3ca30cd2..c80e20a9 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-6tb1.metal.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-6tb1.metal.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-9tb1.metal.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-9tb1.metal.yaml index dd028560..d3e19cc6 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-9tb1.metal.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-u-9tb1.metal.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1.16xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1.16xlarge.yaml index 38a4f765..006ff653 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1.16xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1.16xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1.32xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1.32xlarge.yaml index 002fc273..ceb50a8d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1.32xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1.32xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.16xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.16xlarge.yaml index bce6f5e6..d7e8e884 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.16xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.16xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.2xlarge.yaml index a5196134..070baa58 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.32xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.32xlarge.yaml index af4d921e..2d8ee825 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.32xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.32xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.4xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.4xlarge.yaml index cfd14c85..c2ffed51 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.4xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.4xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.8xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.8xlarge.yaml index 44b99b58..48ef1a0e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.8xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.8xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.xlarge.yaml index b8398eb8..fc35947e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-x1e.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.12xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.12xlarge.yaml index f9f58eeb..8e392cfa 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.12xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.12xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.2xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.2xlarge.yaml index d223c21d..983b891e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.2xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.2xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.3xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.3xlarge.yaml index 28d76758..f71ce9c0 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.3xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.3xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.6xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.6xlarge.yaml index 37b09d42..a5b2d6ef 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.6xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.6xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.large.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.large.yaml index 00196144..fbbee41f 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.large.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.large.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.xlarge.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.xlarge.yaml index 3a157490..11020f11 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.xlarge.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/aws-z1d.xlarge.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a0.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a0.yaml index d0f25b34..b7494f3b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a0.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a0.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a1.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a1.yaml index ccc67c54..15f439e5 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a1.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a1.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a2.yaml index 8a5420bd..f91552b8 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a3.yaml index abd52564..32bd12be 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a4.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a4.yaml index 8385f00e..be100f8e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a4.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-basic-a4.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a0.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a0.yaml index 6e480fe2..60b3479b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a0.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a0.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a1-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a1-v2.yaml index 438fdd61..d979a40a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a1-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a1-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a1.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a1.yaml index f98f428a..c2a1d643 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a1.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a1.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a10.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a10.yaml index a7179d18..c1bde1c6 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a10.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a10.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a11.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a11.yaml index 2e2cc466..a2b34209 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a11.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a11.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2-v2.yaml index 4b543c18..5a152a5b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2.yaml index d3e7cc01..fbbf8e4a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2m-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2m-v2.yaml index 130447ca..1b9106bd 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2m-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a2m-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a3.yaml index 0e57d7ef..ac59b584 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4-v2.yaml index 428f6dca..226964df 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4.yaml index f1fa6ca5..95bcde39 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4m-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4m-v2.yaml index b5a76b9f..ba3a44c6 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4m-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a4m-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a5.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a5.yaml index dc92d5f9..cc0ad5fd 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a5.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a5.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a6.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a6.yaml index 94be0bf8..8ff4f090 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a6.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a6.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a7.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a7.yaml index d1a5585c..a4b85b10 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a7.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a7.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8-v2.yaml index 0737d2e1..b6368637 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8.yaml index 193595fe..1fe08b62 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8m-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8m-v2.yaml index 808e40bb..3c41627e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8m-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a8m-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a9.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a9.yaml index ada730d8..0a509466 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a9.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-a9.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1ls.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1ls.yaml index ec5793c8..c310411a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1ls.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1ls.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1ms.yaml index 30e80c80..6fdd5520 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1s.yaml index 3ac88236..7d2a5689 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b1s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b2ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b2ms.yaml index ef0e4354..4d70de4a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b2ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b2ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b2s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b2s.yaml index e65a3337..22f93baa 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b2s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b2s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b4ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b4ms.yaml index 63b6b99e..c53f7645 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b4ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b4ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b8ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b8ms.yaml index 7a68d729..551d296b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b8ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-b8ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d1-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d1-v2.yaml index 9378cde3..ff0e20fc 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d1-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d1-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d1.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d1.yaml index 8483d556..dc0d8a47 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d1.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d1.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11-v2-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11-v2-promo.yaml index bb5db6ba..8e33cbad 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11-v2-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11-v2-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11-v2.yaml index 88f8f15e..949d2ed2 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11.yaml index 9d8bece4..53ac98fb 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d11.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12-v2-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12-v2-promo.yaml index 0fcd77c9..03b0c70a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12-v2-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12-v2-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12-v2.yaml index 54ca7eba..d3f3362d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12.yaml index cf80162f..ab83c2d8 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d12.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13-v2-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13-v2-promo.yaml index 222b004a..523d0053 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13-v2-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13-v2-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13-v2.yaml index 36e6c2f0..2f46588c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13.yaml index 1ffcd3c3..59b5f653 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d13.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14-v2-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14-v2-promo.yaml index 4b2681da..d01d8d1d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14-v2-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14-v2-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14-v2.yaml index d6ef930c..079a89e2 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14.yaml index ea22d443..7dcf3355 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d14.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d15-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d15-v2.yaml index bbee33ee..2a083acc 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d15-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d15-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d16-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d16-v3.yaml index f26ee155..b07a378f 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d16-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d16-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d16s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d16s-v3.yaml index 971e6831..3d742197 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d16s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d16s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v2-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v2-promo.yaml index 527bf908..3e0c57ff 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v2-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v2-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v2.yaml index e5178cc7..0f92a95e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v3.yaml index 5a515b4e..89a230cb 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2.yaml index 8a238eb4..a29b69b0 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2s-v3.yaml index 2b9d768e..bd0b3338 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d2s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3-v2-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3-v2-promo.yaml index f3922566..f6b785ca 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3-v2-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3-v2-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3-v2.yaml index d0764d5f..f1e75c6b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3.yaml index 7d2435e7..57fbeccb 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d32-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d32-v3.yaml index 7e2e7f8b..4acc182e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d32-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d32-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d32s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d32s-v3.yaml index 1ad2ae9d..3d033d1d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d32s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d32s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v2-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v2-promo.yaml index 31ddf6e2..e1588058 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v2-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v2-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v2.yaml index c97d6780..b07ca0ef 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v3.yaml index 79cdced0..9c174927 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4.yaml index c02d7680..00c4aeb8 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4s-v3.yaml index 101d00b4..d919153b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d4s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d5-v2-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d5-v2-promo.yaml index 09ccc420..767347f5 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d5-v2-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d5-v2-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d5-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d5-v2.yaml index 59dedbe0..6e928d3d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d5-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d5-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d64-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d64-v3.yaml index e0d02155..810169d5 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d64-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d64-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d64s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d64s-v3.yaml index d9047275..1f8cb3d4 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d64s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d64s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d8-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d8-v3.yaml index 05e3bb82..7ad50cab 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d8-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d8-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d8s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d8s-v3.yaml index 3ff14747..c231af85 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d8s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-d8s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-dc2s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-dc2s.yaml index 18d3fdb3..659184ab 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-dc2s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-dc2s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-dc4s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-dc4s.yaml index bae9796f..e3acfb40 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-dc4s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-dc4s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds1-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds1-v2.yaml index f4597796..4f4434db 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds1-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds1-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds1.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds1.yaml index ef03fc26..c4ad3194 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds1.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds1.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-1-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-1-v2.yaml index 4e9fcf7e..b004474f 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-1-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-1-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-v2-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-v2-promo.yaml index 663adc15..ed9ffe77 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-v2-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-v2-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-v2.yaml index 5a2e2d5e..b8b2c341 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11.yaml index 32736f11..d18e3599 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds11.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-1-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-1-v2.yaml index 562db05c..03d6c645 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-1-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-1-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-2-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-2-v2.yaml index c1d09f46..23a60b38 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-2-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-2-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-v2-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-v2-promo.yaml index cff49209..a299bd64 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-v2-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-v2-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-v2.yaml index df606eb2..ba387dfe 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12.yaml index b7abc4d5..52a585b4 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds12.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-2-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-2-v2.yaml index 334f7d3a..d961b0ee 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-2-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-2-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-4-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-4-v2.yaml index f6bd1ff5..8373e3ab 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-4-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-4-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-v2-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-v2-promo.yaml index ea0b39df..ef92dcc6 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-v2-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-v2-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-v2.yaml index 672e259f..a6016895 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13.yaml index 992c0b9b..53db7386 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds13.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-4-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-4-v2.yaml index fdfc5b50..f698bf3f 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-4-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-4-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-8-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-8-v2.yaml index 3de08d38..622af711 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-8-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-8-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-v2-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-v2-promo.yaml index a0f77484..24a34a20 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-v2-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-v2-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-v2.yaml index 20e52bc1..80bcce9d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14.yaml index f98851d4..5e1ab3a0 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds14.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds15-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds15-v2.yaml index fcae9042..f486bd79 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds15-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds15-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2-v2-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2-v2-promo.yaml index 55221ad7..9d948d6e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2-v2-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2-v2-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2-v2.yaml index 097a4bb7..bb00d6df 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2.yaml index 129813d6..e918229a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3-v2-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3-v2-promo.yaml index 8ed45c01..13c7905b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3-v2-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3-v2-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3-v2.yaml index a14da48f..50b0a210 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3.yaml index 48145353..c3be522d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4-v2-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4-v2-promo.yaml index b2087126..02c032fb 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4-v2-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4-v2-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4-v2.yaml index 5325bb13..1a7fd117 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4.yaml index 9d2e6f38..c9fe170f 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds4.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds5-v2-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds5-v2-promo.yaml index 02a901e9..75d55252 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds5-v2-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds5-v2-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds5-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds5-v2.yaml index 9ba2e7ea..62582c14 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds5-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-ds5-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-4s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-4s-v3.yaml index d36d5751..a6ce5a1a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-4s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-4s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-8s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-8s-v3.yaml index 8b7f3ac3..89952844 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-8s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-8s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-v3.yaml index 94022fc5..197628d4 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16s-v3.yaml index 13d95e75..5ea9390c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e16s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e2-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e2-v3.yaml index ef343bc7..42b4dc80 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e2-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e2-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e20-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e20-v3.yaml index 0e0bc28c..d4fa61de 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e20-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e20-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e20s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e20s-v3.yaml index ddf03abb..31b49d53 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e20s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e20s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e2s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e2s-v3.yaml index ed725ca8..9f7f8cb3 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e2s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e2s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-16s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-16s-v3.yaml index d524fa19..a87f60af 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-16s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-16s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-8s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-8s-v3.yaml index fd9a452d..c0c4ad9a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-8s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-8s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-v3.yaml index 44394172..6da91820 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32s-v3.yaml index 37611a25..bf9a11df 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e32s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4-2s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4-2s-v3.yaml index df94de9d..82866f7d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4-2s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4-2s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4-v3.yaml index 8a99bbb2..33a6e548 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4s-v3.yaml index 488a699a..098f3352 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e4s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-16s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-16s-v3.yaml index b9069291..79207b70 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-16s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-16s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-32s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-32s-v3.yaml index 6101790e..7b7e7bd3 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-32s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-32s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-v3.yaml index 3a208e5e..5768ddf7 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64i-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64i-v3.yaml index b08fd08f..261e3882 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64i-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64i-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64is-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64is-v3.yaml index 588469f9..91ad96c3 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64is-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64is-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64s-v3.yaml index 02cc937b..a331ff11 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e64s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-2s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-2s-v3.yaml index d4d4923f..1e6e65bc 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-2s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-2s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-4s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-4s-v3.yaml index 45750784..bd7573a1 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-4s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-4s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-v3.yaml index 4451dafa..1ef6c13c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8s-v3.yaml index 6d02349e..d5d961d7 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-e8s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f1.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f1.yaml index 9fe24d60..0aed5939 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f1.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f1.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16.yaml index 48230367..9a273203 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16s-v2.yaml index 5da7e138..4c8882a7 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16s.yaml index ebb34acc..47f6448b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f16s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f1s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f1s.yaml index 3f4a1546..11131949 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f1s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f1s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2.yaml index cb5af6ae..2a9f09aa 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2s-v2.yaml index a121a2a0..9e4968b6 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2s.yaml index fd18869b..8aa4799d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f2s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f32s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f32s-v2.yaml index 8b2e7df9..ff49317a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f32s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f32s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4.yaml index 289ca78a..97918aa7 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4s-v2.yaml index b90b99a4..1609a321 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4s.yaml index 791a8c01..d7dd376a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f4s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f64s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f64s-v2.yaml index 760775e0..eb70548c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f64s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f64s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f72s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f72s-v2.yaml index 2c642b62..12464597 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f72s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f72s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8.yaml index d2259ae8..159e5aad 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8s-v2.yaml index e521192a..3ea38267 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8s.yaml index dcce0dea..1f51e51a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-f8s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g1.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g1.yaml index 55a8b6db..93d00d90 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g1.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g1.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g2.yaml index af8a4dd7..0a3990cf 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g3.yaml index 6622bda1..0f08b20c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g4.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g4.yaml index e98616fc..85fe77cc 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g4.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g4.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g5.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g5.yaml index ce8826a2..9586ab5d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g5.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-g5.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs1.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs1.yaml index 96b9c5ea..e3d4b1f4 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs1.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs1.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs2.yaml index 0d1008fd..2f8ccbc3 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs3.yaml index 51bb026a..78f7c085 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4-4.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4-4.yaml index a44be626..a77d6176 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4-4.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4-4.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4-8.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4-8.yaml index 1e2b3616..3c1f307e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4-8.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4-8.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4.yaml index c6a507a4..39b15a96 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs4.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5-16.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5-16.yaml index f8697a1c..d042b763 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5-16.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5-16.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5-8.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5-8.yaml index 1236e623..0242b190 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5-8.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5-8.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5.yaml index 528b4ee6..8e27b403 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-gs5.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16-promo.yaml index e7eb7285..84288af1 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16.yaml index 3398680d..2bfc19d8 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16m-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16m-promo.yaml index 7bd88cf8..f38d87a0 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16m-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16m-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16m.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16m.yaml index 00f57e0a..8c6a2934 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16m.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16m.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16mr-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16mr-promo.yaml index 5ee4d28d..38eb3eee 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16mr-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16mr-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16mr.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16mr.yaml index 976d5aa6..bd6a7dc7 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16mr.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16mr.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16r-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16r-promo.yaml index 31d0d417..fb46b7b2 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16r-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16r-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16r.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16r.yaml index fc4e6d87..6161e1dc 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16r.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h16r.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8-promo.yaml index e13e4877..e65ebd44 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8.yaml index 77299e24..a8e04b8f 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8m-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8m-promo.yaml index 1b37cba3..d9075fac 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8m-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8m-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8m.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8m.yaml index f0cf0264..dec54256 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8m.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-h8m.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-hb60rs.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-hb60rs.yaml index 1a45f639..cc3174b2 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-hb60rs.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-hb60rs.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-hc44rs.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-hc44rs.yaml index c5b8d482..15d707aa 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-hc44rs.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-hc44rs.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l16s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l16s-v2.yaml index b60c17d8..d8361a3b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l16s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l16s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l16s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l16s.yaml index 5355ef3b..9afe60c7 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l16s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l16s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l32s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l32s-v2.yaml index cc567a46..8fe5075d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l32s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l32s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l32s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l32s.yaml index d9c85692..2bb004b4 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l32s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l32s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l4s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l4s.yaml index 5861f9d2..e08f63d8 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l4s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l4s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l64s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l64s-v2.yaml index 5494da99..0b94ab90 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l64s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l64s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l80s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l80s-v2.yaml index a933dbac..b0484b4e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l80s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l80s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l8s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l8s-v2.yaml index 497353fd..614d210e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l8s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l8s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l8s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l8s.yaml index 6f01c34a..945e1165 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l8s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-l8s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128-32ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128-32ms.yaml index 3befc60d..3ced106c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128-32ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128-32ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128-64ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128-64ms.yaml index 5696a80d..c47305d4 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128-64ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128-64ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128.yaml index 18c5f7e1..63e3edcb 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128m.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128m.yaml index c6ef9084..aba10258 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128m.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128m.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128ms.yaml index 507659e8..fa21d6dd 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128s.yaml index 2d2b6265..48736bbe 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m128s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16-4ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16-4ms.yaml index ea27cb89..88cf94c1 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16-4ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16-4ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16-8ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16-8ms.yaml index e37ca658..a222a434 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16-8ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16-8ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16ms.yaml index db4d551e..684fc8a0 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m16ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m208ms-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m208ms-v2.yaml index 19400217..42122c9a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m208ms-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m208ms-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m208s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m208s-v2.yaml index 66427156..79f89a44 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m208s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m208s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32-16ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32-16ms.yaml index 29d5c227..a74c01ef 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32-16ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32-16ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32-8ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32-8ms.yaml index 308dbf3f..e74103f7 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32-8ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32-8ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ls.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ls.yaml index c37cecd6..77312dc1 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ls.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ls.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ms.yaml index 53f59747..05d2b3ec 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ts.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ts.yaml index 026f7b59..5a295f74 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ts.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m32ts.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64-16ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64-16ms.yaml index f596ccc6..090c5be1 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64-16ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64-16ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64-32ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64-32ms.yaml index 0479c45d..4ba2911b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64-32ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64-32ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64.yaml index dfe391cc..6e60a5ea 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64ls.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64ls.yaml index 1c3ee676..b53de3d9 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64ls.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64ls.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64m.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64m.yaml index 8461fb38..f4dc1912 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64m.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64m.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64ms.yaml index c899ae86..6c319b9e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64s.yaml index 6ff93907..9d6923b9 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m64s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8-2ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8-2ms.yaml index 48d40edb..bf897699 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8-2ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8-2ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8-4ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8-4ms.yaml index 18d8cd09..470c747d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8-4ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8-4ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8ms.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8ms.yaml index 84a604ab..dfea5fe2 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8ms.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-m8ms.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12-promo.yaml index fe88bed2..60c30fc5 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12.yaml index 94bbea24..9c8353ac 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12s-v2.yaml index feb4b24e..f8c46f21 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12s-v3.yaml index 75ef64af..122e549a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc12s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24-promo.yaml index 3a837b2d..993a7f54 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24.yaml index c64f5cf9..c2624727 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24r-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24r-promo.yaml index 0627920e..37a0d62b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24r-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24r-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24r.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24r.yaml index ba117143..a06bff4e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24r.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24r.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24rs-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24rs-v2.yaml index 404aaa95..b54a2f33 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24rs-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24rs-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24rs-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24rs-v3.yaml index fc047acd..6a6dd421 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24rs-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24rs-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24s-v2.yaml index 0c313086..58face92 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24s-v3.yaml index fd454331..e3ae82d0 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc24s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6-promo.yaml index 26c78d57..aaaca3f6 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6.yaml index 8d94f704..8521a696 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6s-v2.yaml index 817cfd01..b6ec1a4f 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6s-v3.yaml index d6046076..90e9f399 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nc6s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd12s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd12s.yaml index b1a82ae6..506975f4 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd12s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd12s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd24rs.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd24rs.yaml index fb58660a..8237a419 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd24rs.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd24rs.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd24s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd24s.yaml index 071bf650..7120d349 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd24s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd24s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd6s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd6s.yaml index 99c95e50..9c03d875 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd6s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nd6s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12-promo.yaml index 07c3be47..5c937b4d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12.yaml index e6176a18..d7f06538 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12s-v2.yaml index 7a93c800..eaa90378 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12s-v3.yaml index c304341b..8234440e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv12s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24-promo.yaml index fdc2efe9..5ba49eb2 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24.yaml index 14791e67..955c4fea 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24s-v2.yaml index 7f195a91..b223b653 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24s-v3.yaml index 21bca07c..037a8044 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv24s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv48s-v3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv48s-v3.yaml index 4e58bdb3..ed65ebb6 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv48s-v3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv48s-v3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6-promo.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6-promo.yaml index 14a9d24f..92a85b79 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6-promo.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6-promo.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6.yaml index d3691e4b..e801bf40 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6s-v2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6s-v2.yaml index 643b2c8b..5292e110 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6s-v2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-nv6s-v2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-pb6s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-pb6s.yaml index e013cabf..025ab7ab 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-pb6s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/azure-standard-pb6s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-16gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-16gb.yaml index 940cacb5..32c58a72 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-16gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-16gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-1gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-1gb.yaml index c4a801e3..6f54a333 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-1gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-1gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-2gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-2gb.yaml index 5dffb3bb..ece0cc0b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-2gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-2gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-4gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-4gb.yaml index 5a069e70..37552572 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-4gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-4gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-512mb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-512mb.yaml index 9145d9be..a0ff9b6a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-512mb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-512mb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-8gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-8gb.yaml index f5722cbd..bcc268c9 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-8gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-8gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-c-2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-c-2.yaml index 2593fcad..34b96b6b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-c-2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-c-2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-c-4.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-c-4.yaml index ed5ed203..df06b380 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-c-4.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-c-4.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-g-2vcpu-8gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-g-2vcpu-8gb.yaml index 7d530a32..56ec0945 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-g-2vcpu-8gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-g-2vcpu-8gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-g-4vcpu-16gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-g-4vcpu-16gb.yaml index 7773f401..283c9ef3 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-g-4vcpu-16gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-g-4vcpu-16gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-gd-2vcpu-8gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-gd-2vcpu-8gb.yaml index 220902dc..15729b6c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-gd-2vcpu-8gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-gd-2vcpu-8gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-gd-4vcpu-16gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-gd-4vcpu-16gb.yaml index c5e6af18..b190afa6 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-gd-4vcpu-16gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-gd-4vcpu-16gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-1gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-1gb.yaml index da6d32bf..56e34e54 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-1gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-1gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-2gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-2gb.yaml index 008b3bc8..47290b45 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-2gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-2gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-3gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-3gb.yaml index 85b7adf2..0a7e08f7 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-3gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-1vcpu-3gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-2vcpu-2gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-2vcpu-2gb.yaml index 93fc5db1..b252d8bd 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-2vcpu-2gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-2vcpu-2gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-2vcpu-4gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-2vcpu-4gb.yaml index 6fab113a..a68e22eb 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-2vcpu-4gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-2vcpu-4gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-3vcpu-1gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-3vcpu-1gb.yaml index 1cd4d267..ddee148c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-3vcpu-1gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-3vcpu-1gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-4vcpu-8gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-4vcpu-8gb.yaml index 4fe700af..5980233f 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-4vcpu-8gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-4vcpu-8gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-6vcpu-16gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-6vcpu-16gb.yaml index ef9d21ff..0e58e2a3 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-6vcpu-16gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/digitalocean-s-6vcpu-16gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-f1-micro.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-f1-micro.yaml index 17acf5bc..a920cb5a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-f1-micro.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-f1-micro.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-g1-small.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-g1-small.yaml index 824ee305..61fd370d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-g1-small.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-g1-small.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-16.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-16.yaml index 177f7aaf..0053b860 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-16.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-16.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-2.yaml index 383af4c0..bb8546d7 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-32.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-32.yaml index 1eca86d1..ab3ac014 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-32.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-32.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-4.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-4.yaml index dbb15fe8..8f913d5b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-4.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-4.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-64.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-64.yaml index cc6b87c0..2aa93f50 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-64.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-64.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-8.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-8.yaml index ff3e001e..4ee88609 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-8.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-8.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-96.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-96.yaml index 89ed5b7c..50aa88b2 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-96.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highcpu-96.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-16.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-16.yaml index d6fbb892..5f6286f8 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-16.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-16.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-2.yaml index a26f6b08..f2346f27 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-32.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-32.yaml index 26ebc6fa..2bf694df 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-32.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-32.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-4.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-4.yaml index af89c2ef..df199e0a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-4.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-4.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-64.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-64.yaml index 2bf6cfb5..ef0ff6ad 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-64.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-64.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-8.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-8.yaml index 6d110a75..0f0edb3d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-8.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-8.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-96.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-96.yaml index 61dbc5e3..7548e1f8 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-96.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-highmem-96.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-megamem-96.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-megamem-96.yaml index 25c4f6cb..01e1a137 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-megamem-96.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-megamem-96.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-1.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-1.yaml index 39b1010f..86e877ea 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-1.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-1.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-16.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-16.yaml index 56a2970b..a131ef62 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-16.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-16.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-2.yaml index 354534ef..8deafaa1 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-32.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-32.yaml index 2d6d95a8..11355dbc 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-32.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-32.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-4.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-4.yaml index 28cbc0ba..338b02a9 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-4.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-4.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-64.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-64.yaml index 4e3c7b28..79c29b19 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-64.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-64.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-8.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-8.yaml index a5faef4c..38521091 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-8.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-8.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-96.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-96.yaml index b3a92d7f..9b705d7d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-96.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-standard-96.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-160.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-160.yaml index fee886d5..8ad22e8c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-160.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-160.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-40.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-40.yaml index 5ec5c0d8..4ec317e6 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-40.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-40.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-80.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-80.yaml index 39b0c01d..e5987395 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-80.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/gce-n1-ultramem-80.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-16.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-16.yaml index 6062cb4b..c10798b0 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-16.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-16.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-2.yaml index 7de2d9de..afcb2ac6 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-32.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-32.yaml index 01174966..78080bad 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-32.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-32.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-4.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-4.yaml index 377c59f4..a314c0cd 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-4.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-4.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-48.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-48.yaml index 0354c4e6..c3044689 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-48.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-48.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-8.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-8.yaml index ede9b53a..dbfabcf5 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-8.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-dedicated-8.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-1.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-1.yaml index 5154d645..dad7b2e8 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-1.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-1.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-16.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-16.yaml index bc4edf05..5c5d3fa2 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-16.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-16.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-2.yaml index 388bbcc1..cd01e25a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-4.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-4.yaml index c60024e2..44582b4a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-4.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-4.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-8.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-8.yaml index 79106c3b..6b567281 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-8.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-highmem-8.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-nanode-1.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-nanode-1.yaml index af37d2b5..dd176d58 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-nanode-1.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-nanode-1.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-1.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-1.yaml index 4075abf6..4b58df26 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-1.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-1.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-16.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-16.yaml index a042878c..28869bc5 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-16.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-16.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-2.yaml index ecbbd00a..9733276c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-20.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-20.yaml index 80f3c612..9a7585b0 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-20.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-20.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-24.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-24.yaml index c6e5e8f0..d6652325 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-24.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-24.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-32.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-32.yaml index 261aaac8..b057c8c0 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-32.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-32.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-4.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-4.yaml index 2fd4d321..141b30b8 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-4.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-4.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-6.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-6.yaml index 1d9a39b6..449fccdb 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-6.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-6.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-8.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-8.yaml index 4eac95ff..2a545a77 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-8.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/linode-g6-standard-8.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-128gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-128gb.yaml index d1f5531e..b74d5d30 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-128gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-128gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-16gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-16gb.yaml index 52ff2ee8..a94c2a09 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-16gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-16gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-2gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-2gb.yaml index 4b6e99d8..015a162c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-2gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-2gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-32gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-32gb.yaml index e30f6a62..1cc4845c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-32gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-32gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-4gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-4gb.yaml index 69b5371b..b6f5a7c9 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-4gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-4gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-64gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-64gb.yaml index 2ee4f432..e649e78a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-64gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-64gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-8gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-8gb.yaml index 32801baa..6120c3ab 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-8gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-arm64-8gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-0.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-0.yaml index 83e06f61..8bf0512d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-0.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-0.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-1.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-1.yaml index 96be8b06..1897e518 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-1.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-1.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-1e.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-1e.yaml index eb25076a..5c7c6ed2 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-1e.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-1e.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-2.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-2.yaml index 99d19c5b..9986fa34 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-2.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-2.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-2a.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-2a.yaml index fe0d9639..b6c8596c 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-2a.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-2a.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-3.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-3.yaml index 7623e17c..391da1a8 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-3.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-3.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-s.yaml index d17cdd5c..96c387cd 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-baremetal-s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c1.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c1.yaml index 25bb5ff8..04ca5b91 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c1.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c1.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2.large.arm.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2.large.arm.yaml index 04c42fff..e0d4c8da 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2.large.arm.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2.large.arm.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2.medium.x86.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2.medium.x86.yaml index d4f39f65..2f11c11a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2.medium.x86.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2.medium.x86.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2l.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2l.yaml index 39439104..247acc8d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2l.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2l.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2m.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2m.yaml index aeb995b4..37ee3fe2 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2m.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2m.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2s.yaml index 85976d35..572eacb7 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-c2s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-l.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-l.yaml index 49f2a148..b2f32473 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-l.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-l.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-m.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-m.yaml index bac26ec6..bdb7c296 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-m.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-m.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-s.yaml index 20767484..108a4a45 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-xl.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-xl.yaml index e14aac13..6fb46572 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-xl.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-dev1-xl.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-g2.large.x86.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-g2.large.x86.yaml index 5e1fa13b..3279142f 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-g2.large.x86.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-g2.large.x86.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-l.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-l.yaml index 55d8d44b..9fc63531 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-l.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-l.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-m.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-m.yaml index f97e72e8..32a18173 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-m.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-m.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-s.yaml index f0c191a6..5127b649 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-xl.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-xl.yaml index 21425a24..febe60c3 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-xl.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-xl.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-xs.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-xs.yaml index c93b8568..f9c911eb 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-xs.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-gp1-xs.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-m2.xlarge.x86.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-m2.xlarge.x86.yaml index ac378f55..0161444d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-m2.xlarge.x86.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-m2.xlarge.x86.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-l.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-l.yaml index e1ab3b5a..08ae7698 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-l.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-l.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-m.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-m.yaml index 8cb21d65..2bf1428f 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-m.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-m.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-s.yaml index 405bd963..496cd45a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-xl.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-xl.yaml index 71d079c2..3bea601a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-xl.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-xl.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-xs.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-xs.yaml index 3cb83c04..204c929b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-xs.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-pro1-xs.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-render-s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-render-s.yaml index 1866ee56..5b248046 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-render-s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-render-s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-l.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-l.yaml index 1a8c8ab4..a08e1378 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-l.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-l.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-m.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-m.yaml index 057038f4..95455ff9 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-m.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-m.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-s.yaml index 6dde53c8..b6d5493a 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-xs.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-xs.yaml index 7a3e8f9e..aca642e5 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-xs.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-start1-xs.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1l.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1l.yaml index 791ba180..6a1201c4 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1l.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1l.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1m.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1m.yaml index 9b56c7ec..6650c386 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1m.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1m.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1s.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1s.yaml index 10748097..68f79b43 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1s.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-vc1s.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x2.xlarge.x86.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x2.xlarge.x86.yaml index e7700584..ac623058 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x2.xlarge.x86.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x2.xlarge.x86.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-120gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-120gb.yaml index 8063e03d..3a984578 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-120gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-120gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-15gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-15gb.yaml index 2bd467ce..ad15c98e 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-15gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-15gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-30gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-30gb.yaml index 52dc0d7c..9c6ae146 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-30gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-30gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-60gb.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-60gb.yaml index 3870d556..42f08561 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-60gb.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/packet-x64-60gb.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-115.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-115.yaml index 7052285b..29770ebf 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-115.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-115.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-116.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-116.yaml index be454ff0..58354ea8 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-116.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-116.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-117.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-117.yaml index 0a10cd1b..9abef411 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-117.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-117.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-118.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-118.yaml index 49f4a403..68d7cc91 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-118.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-118.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-201.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-201.yaml index 87352f35..ff38df21 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-201.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-201.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-202.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-202.yaml index c99f1404..30cbf38d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-202.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-202.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-203.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-203.yaml index 5a44aed0..912a6f03 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-203.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-203.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-204.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-204.yaml index cd66c827..66b97ddc 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-204.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-204.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-205.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-205.yaml index fbbe38ee..d9c338de 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-205.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-205.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-206.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-206.yaml index 17080306..3858f7fa 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-206.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-206.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-207.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-207.yaml index bdd1040c..e43d74d6 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-207.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-207.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-208.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-208.yaml index 28ad13d7..47a88221 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-208.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-208.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-87.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-87.yaml index 8adc42e5..6b737d5b 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-87.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-87.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-88.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-88.yaml index a390610b..2fced847 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-88.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-88.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-89.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-89.yaml index 963de617..b3f7e9cf 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-89.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-89.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-90.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-90.yaml index f3bb89f5..13122b9d 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-90.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-90.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-91.yaml b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-91.yaml index ba869ad4..c266784f 100755 --- a/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-91.yaml +++ b/data/yaml/apis/cloud.bytebuilders.dev/v1alpha1/machinetypes/vultr-91.yaml @@ -1,4 +1,4 @@ -apiVersion: cloud.bytebuilders.dev/v1alpha1 +apiVersion: cloud.appscode.com/v1alpha1 kind: MachineType metadata: creationTimestamp: null diff --git a/go.mod b/go.mod index d04bf25b..a79479af 100644 --- a/go.mod +++ b/go.mod @@ -17,23 +17,23 @@ require ( github.com/scaleway/scaleway-cli v1.10.2-0.20190910153627-9984ffe9b94e github.com/spf13/cobra v1.6.0 github.com/spf13/pflag v1.0.5 - golang.org/x/oauth2 v0.7.0 + golang.org/x/oauth2 v0.13.0 gomodules.xyz/flags v0.1.3 gomodules.xyz/logs v0.0.6 gomodules.xyz/x v0.0.14 - google.golang.org/api v0.104.0 + google.golang.org/api v0.126.0 k8s.io/apimachinery v0.25.4 k8s.io/client-go v0.25.3 k8s.io/klog/v2 v2.80.1 - kmodules.xyz/client-go v0.25.38 + kmodules.xyz/client-go v0.25.44 kmodules.xyz/crd-schema-fuzz v0.25.0 - kmodules.xyz/resource-metadata v0.17.26-0.20231009112556-f90c55e7954d + kmodules.xyz/resource-metadata v0.17.44 sigs.k8s.io/yaml v1.3.0 ) require ( - cloud.google.com/go/compute v1.13.0 // indirect - cloud.google.com/go/compute/metadata v0.2.2 // indirect + cloud.google.com/go/compute v1.21.0 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 // indirect @@ -41,10 +41,14 @@ require ( github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/Masterminds/sprig/v3 v3.2.2 // indirect github.com/containerd/console v1.0.3 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.12.1 // indirect github.com/creack/goselect v0.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dnaeon/go-vcr v1.2.0 // indirect + github.com/docker/cli v20.10.22+incompatible // indirect + github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/docker v20.10.21+incompatible // indirect + github.com/docker/docker-credential-helpers v0.7.0 // indirect github.com/dustin/go-humanize v1.0.1-0.20220316001817-d5090ed65664 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect @@ -59,15 +63,16 @@ require ( github.com/go-resty/resty/v2 v2.6.0 // indirect github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect github.com/golang-jwt/jwt v3.2.1+incompatible // indirect - github.com/golang-jwt/jwt/v4 v4.4.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.6.9 // indirect github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-containerregistry v0.13.0 // indirect github.com/google/go-querystring v1.1.0 // indirect + github.com/google/s2a-go v0.1.4 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect - github.com/googleapis/gax-go/v2 v2.7.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.11.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef // indirect github.com/huandu/xstrings v1.3.2 // indirect @@ -76,17 +81,21 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.15.11 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.16 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/moul/gotty-client v1.10.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.0-rc2 // indirect github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect github.com/renstrom/fuzzysearch v0.0.0-00010101000000-000000000000 // indirect github.com/rogpeppe/go-internal v1.8.0 // indirect @@ -94,14 +103,15 @@ require ( github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/spf13/cast v1.5.0 // indirect + github.com/vbatts/tar-split v0.11.2 // indirect github.com/yudai/gojsondiff v1.0.0 // indirect github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.13.0 // indirect - golang.org/x/net v0.15.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.12.0 // indirect - golang.org/x/term v0.12.0 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sync v0.4.0 // indirect + golang.org/x/sys v0.13.0 // indirect + golang.org/x/term v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.7.0 // indirect @@ -114,9 +124,9 @@ require ( gomodules.xyz/sets v0.2.1 // indirect gomodules.xyz/wait v0.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 // indirect - google.golang.org/grpc v1.51.0 // indirect - google.golang.org/protobuf v1.28.1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/grpc v1.58.3 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect @@ -125,14 +135,15 @@ require ( k8s.io/apiextensions-apiserver v0.25.3 // indirect k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 // indirect k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 // indirect + kmodules.xyz/go-containerregistry v0.0.11 // indirect kmodules.xyz/offshoot-api v0.25.0 // indirect - kmodules.xyz/resource-metrics v0.25.3 // indirect + kmodules.xyz/resource-metrics v0.25.7 // indirect moul.io/anonuuid v1.3.2 // indirect moul.io/srand v1.6.1 // indirect sigs.k8s.io/controller-runtime v0.13.1 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - x-helm.dev/apimachinery v0.0.12 // indirect + x-helm.dev/apimachinery v0.0.15 // indirect ) replace ( diff --git a/go.sum b/go.sum index eaf1ddcf..e0982f4c 100644 --- a/go.sum +++ b/go.sum @@ -5,18 +5,17 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/compute v1.13.0 h1:AYrLkB8NPdDRslNp4Jxmzrhdr03fUAIDbiGFjLWowoU= -cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= -cloud.google.com/go/compute/metadata v0.2.2 h1:aWKAjYaBaOSrpKl57+jnS/3fJRQnxL7TvR/u1VVbt6k= -cloud.google.com/go/compute/metadata v0.2.2/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute v1.21.0 h1:JNBsyXVoOoNJtTQcnEY5uYpZIbeCTYIeDe0Xh1bySMk= +cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v67.0.0+incompatible h1:SVBwznSETB0Sipd0uyGJr7khLhJOFRUEUb+0JgkCvDo= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0 h1:sVPhtT2qjO86rTUaWMr4WoES4TkjGnzcioXcnHV9s5k= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0 h1:Yoicul8bnVdQrhDMTHxdEckRGX01XvwXDHUT9zYZ3k0= @@ -58,14 +57,21 @@ github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx2 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/stargz-snapshotter/estargz v0.12.1 h1:+7nYmHJb0tEkcRaAW+MHqoKaJYZmkikupxCqVtmPuY0= +github.com/containerd/stargz-snapshotter/estargz v0.12.1/go.mod h1:12VUuCq3qPq4y8yUW+l5w3+oXV3cx2Po3KSe/SmPGqw= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -86,8 +92,14 @@ github.com/digitalocean/godo v1.91.1/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzK github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/cli v20.10.22+incompatible h1:0E7UqWPcn4SlvLImMHyh6xwyNRUGdPxhstpHeh0bFL0= +github.com/docker/cli v20.10.22+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog= github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= +github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.1-0.20220316001817-d5090ed65664 h1:sVcJ9NcFWMaLxylVWc5iOhkLUkh84wHfOFGzaahu9fc= github.com/dustin/go-humanize v1.0.1-0.20220316001817-d5090ed65664/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= @@ -98,6 +110,7 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -143,7 +156,6 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= -github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -184,6 +196,8 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-containerregistry v0.13.0 h1:y1C7Z3e149OJbOPDBxLYR8ITPz8dTKqQwjErKVHJC8k= +github.com/google/go-containerregistry v0.13.0/go.mod h1:J9FQ+eSS4a1aC2GNZxvNpbWhgp0487v+cgiilB4FqDo= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -194,16 +208,18 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs= -github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.11.0 h1:9V9PWXEsWnPpQhu/PeQIkS4eGzMlTLGgt80cUUI8Ki4= +github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -261,6 +277,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/kmodules/k8s-client-go v0.0.0-20221011065734-850c651fe75a h1:FpOgQ+kyKzcDv3H/WzkjWo7ZcN5eE0dT0gza88KzLD8= github.com/kmodules/k8s-client-go v0.0.0-20221011065734-850c651fe75a/go.mod h1:i7cNU7N+yGQmJkewcRD2+Vuj4iz7b30kI8OcL3horQ4= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -298,6 +316,7 @@ github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceT github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= @@ -326,6 +345,10 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk= github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= +github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= github.com/packethost/packngo v0.13.0 h1:VIeDY/Uju53v8LAKxiqTrfR9jkpX5PhWdnQC0h3aUU8= github.com/packethost/packngo v0.13.0/go.mod h1:YrtUNN9IRjjqN6zK+cy2IYoi3EjHfoWTWxJkI1I1Vk0= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -371,6 +394,7 @@ github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXY github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -405,7 +429,10 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.5/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= +github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= @@ -437,8 +464,9 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -480,16 +508,17 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -497,9 +526,10 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -532,13 +562,13 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.12.0 h1:/ZfYdc3zq+q02Rv9vGqTeSItdzZTSNDmfTi0mBAuidU= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -546,6 +576,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= @@ -581,7 +612,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= gomodules.xyz/clock v0.0.0-20200817085942-06523dba733f h1:hTyhR4r+tj1Uq7/PpFxLTzbeA0LhMVp7bEYfhkzFjdY= gomodules.xyz/clock v0.0.0-20200817085942-06523dba733f/go.mod h1:K3m7N+nBOlf91/tpv8REUGwsAgaKFwElQCuiLhm12AQ= gomodules.xyz/encoding v0.0.7 h1:Y4PaflVS+vkYgkw6FwyF1S0ab4Y1BAdOqB3Uwjcx8qI= @@ -610,8 +640,8 @@ google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.104.0 h1:KBfmLRqdZEbwQleFlSLnzpQJwhjpmNOk4cKQIBDZ9mg= -google.golang.org/api v0.104.0/go.mod h1:JCspTXJbBxa5ySXw4UgUqVer7DfVxbvc/CTUFqAED5U= +google.golang.org/api v0.126.0 h1:q4GJq+cAdMAC7XP7njvQ4tvohGLiSlytuL4BQxbIZ+o= +google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -629,8 +659,10 @@ google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 h1:jmIfw8+gSvXcZSgaFAGyInDXeWzUhvYH57G/5GKMn70= -google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -641,8 +673,9 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -655,8 +688,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -683,6 +716,7 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -701,16 +735,18 @@ k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 h1:tBEbstoM+K0FiBV5KGAKQ0 k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -kmodules.xyz/client-go v0.25.38 h1:dNGq7eAVfpeyr7EdIEq3L2WSu+AgVffZlYwyzhye29I= -kmodules.xyz/client-go v0.25.38/go.mod h1:LkGtA7shOM9hLwc7tFUjGrIdGLEa0on5D34LF8NM4Co= +kmodules.xyz/client-go v0.25.44 h1:VqnfvqK7YLl+0sjUGnfXQdq2jQvG8Phkoc4GCk5nKSE= +kmodules.xyz/client-go v0.25.44/go.mod h1:bUfkYVd0Z7NKY87y87JZKYHxP0qk17FQD5zQazNrklQ= kmodules.xyz/crd-schema-fuzz v0.25.0 h1:c5ZxNRqJak1bkGhECmyrKpzKGThFMB4088Kynyvngbc= kmodules.xyz/crd-schema-fuzz v0.25.0/go.mod h1:VigFz19GwCxMGhb3YjCtlSXmfXb0J/g9du1So6rvqsk= +kmodules.xyz/go-containerregistry v0.0.11 h1:eZ7dz5QvszqoedOQLyqf5lkDa+S3Bds4EHHhb6jrLMc= +kmodules.xyz/go-containerregistry v0.0.11/go.mod h1:AUdmeACbvYyX+IsbfeMpVYFQskFsnaltWeRKx0cB0J0= kmodules.xyz/offshoot-api v0.25.0 h1:Svq9da/+sg5afOjpgo9vx2J/Lu90Mo0aFxkdQmgKnGI= kmodules.xyz/offshoot-api v0.25.0/go.mod h1:ysEBn7LJuT3+s8ynAQA/OG0BSsJugXa6KGtDLMRjlKo= -kmodules.xyz/resource-metadata v0.17.26-0.20231009112556-f90c55e7954d h1:E0SkGh/coySTCdes4ZLw9kBT+RaVMjNhzm6mycxhK7g= -kmodules.xyz/resource-metadata v0.17.26-0.20231009112556-f90c55e7954d/go.mod h1:tyLxzAVkhlL3/jFdcQcX1RZ8i8h9bs+AJur5RcuaW40= -kmodules.xyz/resource-metrics v0.25.3 h1:g9EjNfYRrUSnbA4r+bUQefQ5Ban6I6rpKjnB3ER+Yew= -kmodules.xyz/resource-metrics v0.25.3/go.mod h1:H7YLdUQJXUSzf5cNI4IYWU4Wsmrua/jpw7gqDnE3BwM= +kmodules.xyz/resource-metadata v0.17.44 h1:BbsvZxoRZWAgjkHF+VGeO5mo2Lc1BekAvmpiuasdECM= +kmodules.xyz/resource-metadata v0.17.44/go.mod h1:a8MUmKJVYBjlwaEM0wP5z/6KQRGoktSQQ52utvDhpJg= +kmodules.xyz/resource-metrics v0.25.7 h1:ne2cK/dlnfYcuu0+pHX15Y53rcKBQq9DuCtQVcx6rY8= +kmodules.xyz/resource-metrics v0.25.7/go.mod h1:y7pDmTWuVLNGSjwckKCwJFhCgi5fhbwS7PAcH2rmGcY= moul.io/anonuuid v1.3.2 h1:0iPzohbBNJvqVKv2VsqeN8YsDJhJymW/HiytSK2Rf0g= moul.io/anonuuid v1.3.2/go.mod h1:fijmP4WPvGy80Mn0PJVItKXgMt+bcOXHMzItOZu0emI= moul.io/srand v1.6.1 h1:SJ335F+54ivLdlH7wH52Rtyv0Ffos6DpsF5wu3ZVMXU= @@ -724,5 +760,5 @@ sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kF sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= -x-helm.dev/apimachinery v0.0.12 h1:lEuGH5F+6xqL7/uL+SsoOUXeJZFgC8c2OQgtMMj/fQM= -x-helm.dev/apimachinery v0.0.12/go.mod h1:jjkebU6uv/cS/8pQGE8tbgK/rB39yh4ETiAglPbDWZc= +x-helm.dev/apimachinery v0.0.15 h1:+q1bdvVdDC311RRboRcWk0P2aaM5ZyOhxN/H+VJzJTs= +x-helm.dev/apimachinery v0.0.15/go.mod h1:wpYBmu0U/uETX6Kv6TXequ9FPmhI24KquLK2B30wOs8= diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index efedadbe..b1672963 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.13.0" +const Version = "1.21.0" diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 6e3ee8d6..06b95734 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15) + + +### Bug Fixes + +* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165) + ## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index d4aad9bf..c17faa14 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -147,7 +147,7 @@ func testOnGCE() bool { go func() { resolver := &net.Resolver{} - addrs, err := resolver.LookupHost(ctx, "metadata.google.internal") + addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.") if err != nil || len(addrs) == 0 { resc <- false return diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE similarity index 100% rename from vendor/google.golang.org/genproto/LICENSE rename to vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go new file mode 100644 index 00000000..0da3efe4 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -0,0 +1,662 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "context" + "errors" + "fmt" + "io" + "os" + "path" + "runtime" + "strings" + "sync" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" + "golang.org/x/sync/errgroup" +) + +type options struct { + chunkSize int + compressionLevel int + prioritizedFiles []string + missedPrioritizedFiles *[]string + compression Compression + ctx context.Context +} + +type Option func(o *options) error + +// WithChunkSize option specifies the chunk size of eStargz blob to build. +func WithChunkSize(chunkSize int) Option { + return func(o *options) error { + o.chunkSize = chunkSize + return nil + } +} + +// WithCompressionLevel option specifies the gzip compression level. +// The default is gzip.BestCompression. +// See also: https://godoc.org/compress/gzip#pkg-constants +func WithCompressionLevel(level int) Option { + return func(o *options) error { + o.compressionLevel = level + return nil + } +} + +// WithPrioritizedFiles option specifies the list of prioritized files. +// These files must be complete paths that are absolute or relative to "/" +// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar" +// are treated as "/foo/bar". +func WithPrioritizedFiles(files []string) Option { + return func(o *options) error { + o.prioritizedFiles = files + return nil + } +} + +// WithAllowPrioritizeNotFound makes Build continue the execution even if some +// of prioritized files specified by WithPrioritizedFiles option aren't found +// in the input tar. Instead, this records all missed file names to the passed +// slice. +func WithAllowPrioritizeNotFound(missedFiles *[]string) Option { + return func(o *options) error { + if missedFiles == nil { + return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed") + } + o.missedPrioritizedFiles = missedFiles + return nil + } +} + +// WithCompression specifies compression algorithm to be used. +// Default is gzip. +func WithCompression(compression Compression) Option { + return func(o *options) error { + o.compression = compression + return nil + } +} + +// WithContext specifies a context that can be used for clean canceleration. +func WithContext(ctx context.Context) Option { + return func(o *options) error { + o.ctx = ctx + return nil + } +} + +// Blob is an eStargz blob. +type Blob struct { + io.ReadCloser + diffID digest.Digester + tocDigest digest.Digest +} + +// DiffID returns the digest of uncompressed blob. +// It is only valid to call DiffID after Close. +func (b *Blob) DiffID() digest.Digest { + return b.diffID.Digest() +} + +// TOCDigest returns the digest of uncompressed TOC JSON. +func (b *Blob) TOCDigest() digest.Digest { + return b.tocDigest +} + +// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd +// or plain tar) passed through the argument. If there are some prioritized files are listed in +// the option, these files are grouped as "prioritized" and can be used for runtime optimization +// (e.g. prefetch). This function builds a blob in parallel, with dividing that blob into several +// (at least the number of runtime.GOMAXPROCS(0)) sub-blobs. +func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { + var opts options + opts.compressionLevel = gzip.BestCompression // BestCompression by default + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + if opts.compression == nil { + opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) + } + layerFiles := newTempFiles() + ctx := opts.ctx + if ctx == nil { + ctx = context.Background() + } + done := make(chan struct{}) + defer close(done) + go func() { + select { + case <-done: + // nop + case <-ctx.Done(): + layerFiles.CleanupAll() + } + }() + defer func() { + if rErr != nil { + if err := layerFiles.CleanupAll(); err != nil { + rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr) + } + } + if cErr := ctx.Err(); cErr != nil { + rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) + } + }() + tarBlob, err := decompressBlob(tarBlob, layerFiles) + if err != nil { + return nil, err + } + entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles) + if err != nil { + return nil, err + } + tarParts := divideEntries(entries, runtime.GOMAXPROCS(0)) + writers := make([]*Writer, len(tarParts)) + payloads := make([]*os.File, len(tarParts)) + var mu sync.Mutex + var eg errgroup.Group + for i, parts := range tarParts { + i, parts := i, parts + // builds verifiable stargz sub-blobs + eg.Go(func() error { + esgzFile, err := layerFiles.TempFile("", "esgzdata") + if err != nil { + return err + } + sw := NewWriterWithCompressor(esgzFile, opts.compression) + sw.ChunkSize = opts.chunkSize + if err := sw.AppendTar(readerFromEntries(parts...)); err != nil { + return err + } + mu.Lock() + writers[i] = sw + payloads[i] = esgzFile + mu.Unlock() + return nil + }) + } + if err := eg.Wait(); err != nil { + rErr = err + return nil, err + } + tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...) + if err != nil { + rErr = err + return nil, err + } + var rs []io.Reader + for _, p := range payloads { + fs, err := fileSectionReader(p) + if err != nil { + return nil, err + } + rs = append(rs, fs) + } + diffID := digest.Canonical.Digester() + pr, pw := io.Pipe() + go func() { + r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw)) + if err != nil { + pw.CloseWithError(err) + return + } + defer r.Close() + if _, err := io.Copy(diffID.Hash(), r); err != nil { + pw.CloseWithError(err) + return + } + pw.Close() + }() + return &Blob{ + ReadCloser: readCloser{ + Reader: pr, + closeFunc: layerFiles.CleanupAll, + }, + tocDigest: tocDgst, + diffID: diffID, + }, nil +} + +// closeWithCombine takes unclosed Writers and close them. This also returns the +// toc that combined all Writers into. +// Writers doesn't write TOC and footer to the underlying writers so they can be +// combined into a single eStargz and tocAndFooter returned by this function can +// be appended at the tail of that combined blob. +func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { + if len(ws) == 0 { + return nil, "", fmt.Errorf("at least one writer must be passed") + } + for _, w := range ws { + if w.closed { + return nil, "", fmt.Errorf("writer must be unclosed") + } + defer func(w *Writer) { w.closed = true }(w) + if err := w.closeGz(); err != nil { + return nil, "", err + } + if err := w.bw.Flush(); err != nil { + return nil, "", err + } + } + var ( + mtoc = new(JTOC) + currentOffset int64 + ) + mtoc.Version = ws[0].toc.Version + for _, w := range ws { + for _, e := range w.toc.Entries { + // Recalculate Offset of non-empty files/chunks + if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" { + e.Offset += currentOffset + } + mtoc.Entries = append(mtoc.Entries, e) + } + if w.toc.Version > mtoc.Version { + mtoc.Version = w.toc.Version + } + currentOffset += w.cw.n + } + + return tocAndFooter(ws[0].compressor, mtoc, currentOffset) +} + +func tocAndFooter(compressor Compressor, toc *JTOC, offset int64) (io.Reader, digest.Digest, error) { + buf := new(bytes.Buffer) + tocDigest, err := compressor.WriteTOCAndFooter(buf, offset, toc, nil) + if err != nil { + return nil, "", err + } + return buf, tocDigest, nil +} + +// divideEntries divides passed entries to the parts at least the number specified by the +// argument. +func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) { + var estimatedSize int64 + for _, e := range entries { + estimatedSize += e.header.Size + } + unitSize := estimatedSize / int64(minPartsNum) + var ( + nextEnd = unitSize + offset int64 + ) + set = append(set, []*entry{}) + for _, e := range entries { + set[len(set)-1] = append(set[len(set)-1], e) + offset += e.header.Size + if offset > nextEnd { + set = append(set, []*entry{}) + nextEnd += unitSize + } + } + return +} + +var errNotFound = errors.New("not found") + +// sortEntries reads the specified tar blob and returns a list of tar entries. +// If some of prioritized files are specified, the list starts from these +// files with keeping the order specified by the argument. +func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) { + + // Import tar file. + intar, err := importTar(in) + if err != nil { + return nil, fmt.Errorf("failed to sort: %w", err) + } + + // Sort the tar file respecting to the prioritized files list. + sorted := &tarFile{} + for _, l := range prioritized { + if err := moveRec(l, intar, sorted); err != nil { + if errors.Is(err, errNotFound) && missedPrioritized != nil { + *missedPrioritized = append(*missedPrioritized, l) + continue // allow not found + } + return nil, fmt.Errorf("failed to sort tar entries: %w", err) + } + } + if len(prioritized) == 0 { + sorted.add(&entry{ + header: &tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } else { + sorted.add(&entry{ + header: &tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } + + // Dump all entry and concatinate them. + return append(sorted.dump(), intar.dump()...), nil +} + +// readerFromEntries returns a reader of tar archive that contains entries passed +// through the arguments. +func readerFromEntries(entries ...*entry) io.Reader { + pr, pw := io.Pipe() + go func() { + tw := tar.NewWriter(pw) + defer tw.Close() + for _, entry := range entries { + if err := tw.WriteHeader(entry.header); err != nil { + pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err)) + return + } + if _, err := io.Copy(tw, entry.payload); err != nil { + pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err)) + return + } + } + pw.Close() + }() + return pr +} + +func importTar(in io.ReaderAt) (*tarFile, error) { + tf := &tarFile{} + pw, err := newCountReader(in) + if err != nil { + return nil, fmt.Errorf("failed to make position watcher: %w", err) + } + tr := tar.NewReader(pw) + + // Walk through all nodes. + for { + // Fetch and parse next header. + h, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } else { + return nil, fmt.Errorf("failed to parse tar file, %w", err) + } + } + switch cleanEntryName(h.Name) { + case PrefetchLandmark, NoPrefetchLandmark: + // Ignore existing landmark + continue + } + + // Add entry. If it already exists, replace it. + if _, ok := tf.get(h.Name); ok { + tf.remove(h.Name) + } + tf.add(&entry{ + header: h, + payload: io.NewSectionReader(in, pw.currentPos(), h.Size), + }) + } + + return tf, nil +} + +func moveRec(name string, in *tarFile, out *tarFile) error { + name = cleanEntryName(name) + if name == "" { // root directory. stop recursion. + if e, ok := in.get(name); ok { + // entry of the root directory exists. we should move it as well. + // this case will occur if tar entries are prefixed with "./", "/", etc. + out.add(e) + in.remove(name) + } + return nil + } + + _, okIn := in.get(name) + _, okOut := out.get(name) + if !okIn && !okOut { + return fmt.Errorf("file: %q: %w", name, errNotFound) + } + + parent, _ := path.Split(strings.TrimSuffix(name, "/")) + if err := moveRec(parent, in, out); err != nil { + return err + } + if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink { + if err := moveRec(e.header.Linkname, in, out); err != nil { + return err + } + } + if e, ok := in.get(name); ok { + out.add(e) + in.remove(name) + } + return nil +} + +type entry struct { + header *tar.Header + payload io.ReadSeeker +} + +type tarFile struct { + index map[string]*entry + stream []*entry +} + +func (f *tarFile) add(e *entry) { + if f.index == nil { + f.index = make(map[string]*entry) + } + f.index[cleanEntryName(e.header.Name)] = e + f.stream = append(f.stream, e) +} + +func (f *tarFile) remove(name string) { + name = cleanEntryName(name) + if f.index != nil { + delete(f.index, name) + } + var filtered []*entry + for _, e := range f.stream { + if cleanEntryName(e.header.Name) == name { + continue + } + filtered = append(filtered, e) + } + f.stream = filtered +} + +func (f *tarFile) get(name string) (e *entry, ok bool) { + if f.index == nil { + return nil, false + } + e, ok = f.index[cleanEntryName(name)] + return +} + +func (f *tarFile) dump() []*entry { + return f.stream +} + +type readCloser struct { + io.Reader + closeFunc func() error +} + +func (rc readCloser) Close() error { + return rc.closeFunc() +} + +func fileSectionReader(file *os.File) (*io.SectionReader, error) { + info, err := file.Stat() + if err != nil { + return nil, err + } + return io.NewSectionReader(file, 0, info.Size()), nil +} + +func newTempFiles() *tempFiles { + return &tempFiles{} +} + +type tempFiles struct { + files []*os.File + filesMu sync.Mutex + cleanupOnce sync.Once +} + +func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { + f, err := os.CreateTemp(dir, pattern) + if err != nil { + return nil, err + } + tf.filesMu.Lock() + tf.files = append(tf.files, f) + tf.filesMu.Unlock() + return f, nil +} + +func (tf *tempFiles) CleanupAll() (err error) { + tf.cleanupOnce.Do(func() { + err = tf.cleanupAll() + }) + return +} + +func (tf *tempFiles) cleanupAll() error { + tf.filesMu.Lock() + defer tf.filesMu.Unlock() + var allErr []error + for _, f := range tf.files { + if err := f.Close(); err != nil { + allErr = append(allErr, err) + } + if err := os.Remove(f.Name()); err != nil { + allErr = append(allErr, err) + } + } + tf.files = nil + return errorutil.Aggregate(allErr) +} + +func newCountReader(r io.ReaderAt) (*countReader, error) { + pos := int64(0) + return &countReader{r: r, cPos: &pos}, nil +} + +type countReader struct { + r io.ReaderAt + cPos *int64 + + mu sync.Mutex +} + +func (cr *countReader) Read(p []byte) (int, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + n, err := cr.r.ReadAt(p, *cr.cPos) + if err == nil { + *cr.cPos += int64(n) + } + return n, err +} + +func (cr *countReader) Seek(offset int64, whence int) (int64, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + switch whence { + default: + return 0, fmt.Errorf("Unknown whence: %v", whence) + case io.SeekStart: + case io.SeekCurrent: + offset += *cr.cPos + case io.SeekEnd: + return 0, fmt.Errorf("Unsupported whence: %v", whence) + } + + if offset < 0 { + return 0, fmt.Errorf("invalid offset") + } + *cr.cPos = offset + return offset, nil +} + +func (cr *countReader) currentPos() int64 { + cr.mu.Lock() + defer cr.mu.Unlock() + + return *cr.cPos +} + +func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) { + if org.Size() < 4 { + return org, nil + } + src := make([]byte, 4) + if _, err := org.Read(src); err != nil && err != io.EOF { + return nil, err + } + var dR io.Reader + if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) { + // gzip + dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) + if err != nil { + return nil, err + } + defer dgR.Close() + dR = io.Reader(dgR) + } else if bytes.Equal([]byte{0x28, 0xb5, 0x2f, 0xfd}, src[:4]) { + // zstd + dzR, err := zstd.NewReader(io.NewSectionReader(org, 0, org.Size())) + if err != nil { + return nil, err + } + defer dzR.Close() + dR = io.Reader(dzR) + } else { + // uncompressed + return io.NewSectionReader(org, 0, org.Size()), nil + } + b, err := tmp.TempFile("", "uncompresseddata") + if err != nil { + return nil, err + } + if _, err := io.Copy(b, dR); err != nil { + return nil, err + } + return fileSectionReader(b) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go new file mode 100644 index 00000000..6de78b02 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go @@ -0,0 +1,40 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errorutil + +import ( + "errors" + "fmt" + "strings" +) + +// Aggregate combines a list of errors into a single new error. +func Aggregate(errs []error) error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + points := make([]string, len(errs)+1) + points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs)) + for i, err := range errs { + points[i+1] = fmt.Sprintf("* %s", err) + } + return errors.New(strings.Join(points, "\n\t")) + } +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go new file mode 100644 index 00000000..921e59ec --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -0,0 +1,1041 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "errors" + "fmt" + "hash" + "io" + "os" + "path" + "sort" + "strings" + "sync" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + digest "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" +) + +// A Reader permits random access reads from a stargz file. +type Reader struct { + sr *io.SectionReader + toc *JTOC + tocDigest digest.Digest + + // m stores all non-chunk entries, keyed by name. + m map[string]*TOCEntry + + // chunks stores all TOCEntry values for regular files that + // are split up. For a file with a single chunk, it's only + // stored in m. + chunks map[string][]*TOCEntry + + decompressor Decompressor +} + +type openOpts struct { + tocOffset int64 + decompressors []Decompressor + telemetry *Telemetry +} + +// OpenOption is an option used during opening the layer +type OpenOption func(o *openOpts) error + +// WithTOCOffset option specifies the offset of TOC +func WithTOCOffset(tocOffset int64) OpenOption { + return func(o *openOpts) error { + o.tocOffset = tocOffset + return nil + } +} + +// WithDecompressors option specifies decompressors to use. +// Default is gzip-based decompressor. +func WithDecompressors(decompressors ...Decompressor) OpenOption { + return func(o *openOpts) error { + o.decompressors = decompressors + return nil + } +} + +// WithTelemetry option specifies the telemetry hooks +func WithTelemetry(telemetry *Telemetry) OpenOption { + return func(o *openOpts) error { + o.telemetry = telemetry + return nil + } +} + +// MeasureLatencyHook is a func which takes start time and records the diff +type MeasureLatencyHook func(time.Time) + +// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record +// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...) +type Telemetry struct { + GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds) + GetTocLatency MeasureLatencyHook // measure time to GET TOC JSON (in milliseconds) + DeserializeTocLatency MeasureLatencyHook // measure time to deserialize TOC JSON (in milliseconds) +} + +// Open opens a stargz file for reading. +// The behavior is configurable using options. +// +// Note that each entry name is normalized as the path that is relative to root. +func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { + var opts openOpts + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + + gzipCompressors := []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} + decompressors := append(gzipCompressors, opts.decompressors...) + + // Determine the size to fetch. Try to fetch as many bytes as possible. + fetchSize := maxFooterSize(sr.Size(), decompressors...) + if maybeTocOffset := opts.tocOffset; maybeTocOffset > fetchSize { + if maybeTocOffset > sr.Size() { + return nil, fmt.Errorf("blob size %d is smaller than the toc offset", sr.Size()) + } + fetchSize = sr.Size() - maybeTocOffset + } + + start := time.Now() // before getting layer footer + footer := make([]byte, fetchSize) + if _, err := sr.ReadAt(footer, sr.Size()-fetchSize); err != nil { + return nil, fmt.Errorf("error reading footer: %v", err) + } + if opts.telemetry != nil && opts.telemetry.GetFooterLatency != nil { + opts.telemetry.GetFooterLatency(start) + } + + var allErr []error + var found bool + var r *Reader + for _, d := range decompressors { + fSize := d.FooterSize() + fOffset := positive(int64(len(footer)) - fSize) + maybeTocBytes := footer[:fOffset] + _, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:]) + if err != nil { + allErr = append(allErr, err) + continue + } + if tocSize <= 0 { + tocSize = sr.Size() - tocOffset - fSize + } + if tocSize < int64(len(maybeTocBytes)) { + maybeTocBytes = maybeTocBytes[:tocSize] + } + r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts) + if err == nil { + found = true + break + } + allErr = append(allErr, err) + } + if !found { + return nil, errorutil.Aggregate(allErr) + } + if err := r.initFields(); err != nil { + return nil, fmt.Errorf("failed to initialize fields of entries: %v", err) + } + return r, nil +} + +// OpenFooter extracts and parses footer from the given blob. +// only supports gzip-based eStargz. +func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) { + if sr.Size() < FooterSize && sr.Size() < legacyFooterSize { + return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size()) + } + var footer [FooterSize]byte + if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil { + return 0, 0, fmt.Errorf("error reading footer: %v", err) + } + var allErr []error + for _, d := range []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} { + fSize := d.FooterSize() + fOffset := positive(int64(len(footer)) - fSize) + _, tocOffset, _, err := d.ParseFooter(footer[fOffset:]) + if err == nil { + return tocOffset, fSize, err + } + allErr = append(allErr, err) + } + return 0, 0, errorutil.Aggregate(allErr) +} + +// initFields populates the Reader from r.toc after decoding it from +// JSON. +// +// Unexported fields are populated and TOCEntry fields that were +// implicit in the JSON are populated. +func (r *Reader) initFields() error { + r.m = make(map[string]*TOCEntry, len(r.toc.Entries)) + r.chunks = make(map[string][]*TOCEntry) + var lastPath string + uname := map[int]string{} + gname := map[int]string{} + var lastRegEnt *TOCEntry + for _, ent := range r.toc.Entries { + ent.Name = cleanEntryName(ent.Name) + if ent.Type == "reg" { + lastRegEnt = ent + } + if ent.Type == "chunk" { + ent.Name = lastPath + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + if ent.ChunkSize == 0 && lastRegEnt != nil { + ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset + } + } else { + lastPath = ent.Name + + if ent.Uname != "" { + uname[ent.UID] = ent.Uname + } else { + ent.Uname = uname[ent.UID] + } + if ent.Gname != "" { + gname[ent.GID] = ent.Gname + } else { + ent.Gname = uname[ent.GID] + } + + ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339) + + if ent.Type == "dir" { + ent.NumLink++ // Parent dir links to this directory + } + r.m[ent.Name] = ent + } + if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size { + r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1) + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + } + if ent.ChunkSize == 0 && ent.Size != 0 { + ent.ChunkSize = ent.Size + } + } + + // Populate children, add implicit directories: + for _, ent := range r.toc.Entries { + if ent.Type == "chunk" { + continue + } + // add "foo/": + // add "foo" child to "" (creating "" if necessary) + // + // add "foo/bar/": + // add "bar" child to "foo" (creating "foo" if necessary) + // + // add "foo/bar.txt": + // add "bar.txt" child to "foo" (creating "foo" if necessary) + // + // add "a/b/c/d/e/f.txt": + // create "a/b/c/d/e" node + // add "f.txt" child to "e" + + name := ent.Name + pdirName := parentDir(name) + if name == pdirName { + // This entry and its parent are the same. + // Ignore this for avoiding infinite loop of the reference. + // The example case where this can occur is when tar contains the root + // directory itself (e.g. "./", "/"). + continue + } + pdir := r.getOrCreateDir(pdirName) + ent.NumLink++ // at least one name(ent.Name) references this entry. + if ent.Type == "hardlink" { + org, err := r.getSource(ent) + if err != nil { + return err + } + org.NumLink++ // original entry is referenced by this ent.Name. + ent = org + } + pdir.addChild(path.Base(name), ent) + } + + lastOffset := r.sr.Size() + for i := len(r.toc.Entries) - 1; i >= 0; i-- { + e := r.toc.Entries[i] + if e.isDataType() { + e.nextOffset = lastOffset + } + if e.Offset != 0 { + lastOffset = e.Offset + } + } + + return nil +} + +func (r *Reader) getSource(ent *TOCEntry) (_ *TOCEntry, err error) { + if ent.Type == "hardlink" { + org, ok := r.m[cleanEntryName(ent.LinkName)] + if !ok { + return nil, fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName) + } + ent, err = r.getSource(org) + if err != nil { + return nil, err + } + } + return ent, nil +} + +func parentDir(p string) string { + dir, _ := path.Split(p) + return strings.TrimSuffix(dir, "/") +} + +func (r *Reader) getOrCreateDir(d string) *TOCEntry { + e, ok := r.m[d] + if !ok { + e = &TOCEntry{ + Name: d, + Type: "dir", + Mode: 0755, + NumLink: 2, // The directory itself(.) and the parent link to this directory. + } + r.m[d] = e + if d != "" { + pdir := r.getOrCreateDir(parentDir(d)) + pdir.addChild(path.Base(d), e) + } + } + return e +} + +func (r *Reader) TOCDigest() digest.Digest { + return r.tocDigest +} + +// VerifyTOC checks that the TOC JSON in the passed blob matches the +// passed digests and that the TOC JSON contains digests for all chunks +// contained in the blob. If the verification succceeds, this function +// returns TOCEntryVerifier which holds all chunk digests in the stargz blob. +func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) { + // Verify the digest of TOC JSON + if r.tocDigest != tocDigest { + return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest) + } + return r.Verifiers() +} + +// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases +// because this doesn't verify TOC. +func (r *Reader) Verifiers() (TOCEntryVerifier, error) { + chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest + regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest + var chunkDigestMapIncomplete bool + var regDigestMapIncomplete bool + var containsChunk bool + for _, e := range r.toc.Entries { + if e.Type != "reg" && e.Type != "chunk" { + continue + } + + // offset must be unique in stargz blob + _, dOK := chunkDigestMap[e.Offset] + _, rOK := regDigestMap[e.Offset] + if dOK || rOK { + return nil, fmt.Errorf("offset %d found twice", e.Offset) + } + + if e.Type == "reg" { + if e.Size == 0 { + continue // ignores empty file + } + + // record the digest of regular file payload + if e.Digest != "" { + d, err := digest.Parse(e.Digest) + if err != nil { + return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err) + } + regDigestMap[e.Offset] = d + } else { + regDigestMapIncomplete = true + } + } else { + containsChunk = true // this layer contains "chunk" entries. + } + + // "reg" also can contain ChunkDigest (e.g. when "reg" is the first entry of + // chunked file) + if e.ChunkDigest != "" { + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err) + } + chunkDigestMap[e.Offset] = d + } else { + chunkDigestMapIncomplete = true + } + } + + if chunkDigestMapIncomplete { + // Though some chunk digests are not found, if this layer doesn't contain + // "chunk"s and all digest of "reg" files are recorded, we can use them instead. + if !containsChunk && !regDigestMapIncomplete { + return &verifier{digestMap: regDigestMap}, nil + } + return nil, fmt.Errorf("some ChunkDigest not found in TOC JSON") + } + + return &verifier{digestMap: chunkDigestMap}, nil +} + +// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by +// offset of the chunk. +type verifier struct { + digestMap map[int64]digest.Digest + digestMapMu sync.Mutex +} + +// Verifier returns a content verifier specified by TOCEntry. +func (v *verifier) Verifier(ce *TOCEntry) (digest.Verifier, error) { + v.digestMapMu.Lock() + defer v.digestMapMu.Unlock() + d, ok := v.digestMap[ce.Offset] + if !ok { + return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered", + ce.Offset, ce.ChunkSize) + } + return d.Verifier(), nil +} + +// ChunkEntryForOffset returns the TOCEntry containing the byte of the +// named file at the given offset within the file. +// Name must be absolute path or one that is relative to root. +func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) { + name = cleanEntryName(name) + e, ok = r.Lookup(name) + if !ok || !e.isDataType() { + return nil, false + } + ents := r.chunks[name] + if len(ents) < 2 { + if offset >= e.ChunkSize { + return nil, false + } + return e, true + } + i := sort.Search(len(ents), func(i int) bool { + e := ents[i] + return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize) + }) + if i == len(ents) { + return nil, false + } + return ents[i], true +} + +// Lookup returns the Table of Contents entry for the given path. +// +// To get the root directory, use the empty string. +// Path must be absolute path or one that is relative to root. +func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) { + path = cleanEntryName(path) + if r == nil { + return + } + e, ok = r.m[path] + if ok && e.Type == "hardlink" { + var err error + e, err = r.getSource(e) + if err != nil { + return nil, false + } + } + return +} + +// OpenFile returns the reader of the specified file payload. +// +// Name must be absolute path or one that is relative to root. +func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { + name = cleanEntryName(name) + ent, ok := r.Lookup(name) + if !ok { + // TODO: come up with some error plan. This is lazy: + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: os.ErrNotExist, + } + } + if ent.Type != "reg" { + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: errors.New("not a regular file"), + } + } + fr := &fileReader{ + r: r, + size: ent.Size, + ents: r.getChunks(ent), + } + return io.NewSectionReader(fr, 0, fr.size), nil +} + +func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry { + if ents, ok := r.chunks[ent.Name]; ok { + return ents + } + return []*TOCEntry{ent} +} + +type fileReader struct { + r *Reader + size int64 + ents []*TOCEntry // 1 or more reg/chunk entries +} + +func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { + if off >= fr.size { + return 0, io.EOF + } + if off < 0 { + return 0, errors.New("invalid offset") + } + var i int + if len(fr.ents) > 1 { + i = sort.Search(len(fr.ents), func(i int) bool { + return fr.ents[i].ChunkOffset >= off + }) + if i == len(fr.ents) { + i = len(fr.ents) - 1 + } + } + ent := fr.ents[i] + if ent.ChunkOffset > off { + if i == 0 { + return 0, errors.New("internal error; first chunk offset is non-zero") + } + ent = fr.ents[i-1] + } + + // If ent is a chunk of a large file, adjust the ReadAt + // offset by the chunk's offset. + off -= ent.ChunkOffset + + finalEnt := fr.ents[len(fr.ents)-1] + compressedOff := ent.Offset + // compressedBytesRemain is the number of compressed bytes in this + // file remaining, over 1+ chunks. + compressedBytesRemain := finalEnt.NextOffset() - compressedOff + + sr := io.NewSectionReader(fr.r.sr, compressedOff, compressedBytesRemain) + + const maxRead = 2 << 20 + var bufSize = maxRead + if compressedBytesRemain < maxRead { + bufSize = int(compressedBytesRemain) + } + + br := bufio.NewReaderSize(sr, bufSize) + if _, err := br.Peek(bufSize); err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err) + } + + dr, err := fr.r.decompressor.Reader(br) + if err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) + } + defer dr.Close() + if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { + return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err) + } + return io.ReadFull(dr, p) +} + +// A Writer writes stargz files. +// +// Use NewWriter to create a new Writer. +type Writer struct { + bw *bufio.Writer + cw *countWriter + toc *JTOC + diffHash hash.Hash // SHA-256 of uncompressed tar + + closed bool + gz io.WriteCloser + lastUsername map[int]string + lastGroupname map[int]string + compressor Compressor + + // ChunkSize optionally controls the maximum number of bytes + // of data of a regular file that can be written in one gzip + // stream before a new gzip stream is started. + // Zero means to use a default, currently 4 MiB. + ChunkSize int +} + +// currentCompressionWriter writes to the current w.gz field, which can +// change throughout writing a tar entry. +// +// Additionally, it updates w's SHA-256 of the uncompressed bytes +// of the tar file. +type currentCompressionWriter struct{ w *Writer } + +func (ccw currentCompressionWriter) Write(p []byte) (int, error) { + ccw.w.diffHash.Write(p) + if ccw.w.gz == nil { + if err := ccw.w.condOpenGz(); err != nil { + return 0, err + } + } + return ccw.w.gz.Write(p) +} + +func (w *Writer) chunkSize() int { + if w.ChunkSize <= 0 { + return 4 << 20 + } + return w.ChunkSize +} + +// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob. +// TOC JSON and footer are removed. +func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) { + footerSize := c.FooterSize() + if sr.Size() < footerSize { + return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize) + } + footerOffset := sr.Size() - footerSize + footer := make([]byte, footerSize) + if _, err := sr.ReadAt(footer, footerOffset); err != nil { + return nil, err + } + blobPayloadSize, _, _, err := c.ParseFooter(footer) + if err != nil { + return nil, fmt.Errorf("failed to parse footer: %w", err) + } + return c.Reader(io.LimitReader(sr, blobPayloadSize)) +} + +// NewWriter returns a new stargz writer (gzip-based) writing to w. +// +// The writer must be closed to write its trailing table of contents. +func NewWriter(w io.Writer) *Writer { + return NewWriterLevel(w, gzip.BestCompression) +} + +// NewWriterLevel returns a new stargz writer (gzip-based) writing to w. +// The compression level is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterLevel(w io.Writer, compressionLevel int) *Writer { + return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel)) +} + +// NewWriterWithCompressor returns a new stargz writer writing to w. +// The compression method is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer { + bw := bufio.NewWriter(w) + cw := &countWriter{w: bw} + return &Writer{ + bw: bw, + cw: cw, + toc: &JTOC{Version: 1}, + diffHash: sha256.New(), + compressor: c, + } +} + +// Close writes the stargz's table of contents and flushes all the +// buffers, returning any error. +func (w *Writer) Close() (digest.Digest, error) { + if w.closed { + return "", nil + } + defer func() { w.closed = true }() + + if err := w.closeGz(); err != nil { + return "", err + } + + // Write the TOC index and footer. + tocDigest, err := w.compressor.WriteTOCAndFooter(w.cw, w.cw.n, w.toc, w.diffHash) + if err != nil { + return "", err + } + if err := w.bw.Flush(); err != nil { + return "", err + } + + return tocDigest, nil +} + +func (w *Writer) closeGz() error { + if w.closed { + return errors.New("write on closed Writer") + } + if w.gz != nil { + if err := w.gz.Close(); err != nil { + return err + } + w.gz = nil + } + return nil +} + +// nameIfChanged returns name, unless it was the already the value of (*mp)[id], +// in which case it returns the empty string. +func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { + if name == "" { + return "" + } + if *mp == nil { + *mp = make(map[int]string) + } + if (*mp)[id] == name { + return "" + } + (*mp)[id] = name + return name +} + +func (w *Writer) condOpenGz() (err error) { + if w.gz == nil { + w.gz, err = w.compressor.Writer(w.cw) + } + return +} + +// AppendTar reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be compressed by the specified compressor. +func (w *Writer) AppendTar(r io.Reader) error { + return w.appendTar(r, false) +} + +// AppendTarLossLess reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be compressed by the specified compressor. +// +// The difference of this func with AppendTar is that this writes +// the input tar stream into w without any modification (e.g. to header bytes). +// +// Note that if the input tar stream already contains TOC JSON, this returns +// error because w cannot overwrite the TOC JSON to the one generated by w without +// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz, +// you shoud decompress it and remove TOC JSON in advance. +func (w *Writer) AppendTarLossLess(r io.Reader) error { + return w.appendTar(r, true) +} + +func (w *Writer) appendTar(r io.Reader, lossless bool) error { + var src io.Reader + br := bufio.NewReader(r) + if isGzip(br) { + zr, _ := gzip.NewReader(br) + src = zr + } else { + src = io.Reader(br) + } + dst := currentCompressionWriter{w} + var tw *tar.Writer + if !lossless { + tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode. + } + tr := tar.NewReader(src) + if lossless { + tr.RawAccounting = true + } + for { + h, err := tr.Next() + if err == io.EOF { + if lossless { + if remain := tr.RawBytes(); len(remain) > 0 { + // Collect the remaining null bytes. + // https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53 + if _, err := dst.Write(remain); err != nil { + return err + } + } + } + break + } + if err != nil { + return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err) + } + if cleanEntryName(h.Name) == TOCTarName { + // It is possible for a layer to be "stargzified" twice during the + // distribution lifecycle. So we reserve "TOCTarName" here to avoid + // duplicated entries in the resulting layer. + if lossless { + // We cannot handle this in lossless way. + return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append") + } + continue + } + + xattrs := make(map[string][]byte) + const xattrPAXRecordsPrefix = "SCHILY.xattr." + if h.PAXRecords != nil { + for k, v := range h.PAXRecords { + if strings.HasPrefix(k, xattrPAXRecordsPrefix) { + xattrs[k[len(xattrPAXRecordsPrefix):]] = []byte(v) + } + } + } + ent := &TOCEntry{ + Name: h.Name, + Mode: h.Mode, + UID: h.Uid, + GID: h.Gid, + Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname), + Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname), + ModTime3339: formatModtime(h.ModTime), + Xattrs: xattrs, + } + if err := w.condOpenGz(); err != nil { + return err + } + if tw != nil { + if err := tw.WriteHeader(h); err != nil { + return err + } + } else { + if _, err := dst.Write(tr.RawBytes()); err != nil { + return err + } + } + switch h.Typeflag { + case tar.TypeLink: + ent.Type = "hardlink" + ent.LinkName = h.Linkname + case tar.TypeSymlink: + ent.Type = "symlink" + ent.LinkName = h.Linkname + case tar.TypeDir: + ent.Type = "dir" + case tar.TypeReg: + ent.Type = "reg" + ent.Size = h.Size + case tar.TypeChar: + ent.Type = "char" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeBlock: + ent.Type = "block" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeFifo: + ent.Type = "fifo" + default: + return fmt.Errorf("unsupported input tar entry %q", h.Typeflag) + } + + // We need to keep a reference to the TOC entry for regular files, so that we + // can fill the digest later. + var regFileEntry *TOCEntry + var payloadDigest digest.Digester + if h.Typeflag == tar.TypeReg { + regFileEntry = ent + payloadDigest = digest.Canonical.Digester() + } + + if h.Typeflag == tar.TypeReg && ent.Size > 0 { + var written int64 + totalSize := ent.Size // save it before we destroy ent + tee := io.TeeReader(tr, payloadDigest.Hash()) + for written < totalSize { + if err := w.closeGz(); err != nil { + return err + } + + chunkSize := int64(w.chunkSize()) + remain := totalSize - written + if remain < chunkSize { + chunkSize = remain + } else { + ent.ChunkSize = chunkSize + } + ent.Offset = w.cw.n + ent.ChunkOffset = written + chunkDigest := digest.Canonical.Digester() + + if err := w.condOpenGz(); err != nil { + return err + } + + teeChunk := io.TeeReader(tee, chunkDigest.Hash()) + var out io.Writer + if tw != nil { + out = tw + } else { + out = dst + } + if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil { + return fmt.Errorf("error copying %q: %v", h.Name, err) + } + ent.ChunkDigest = chunkDigest.Digest().String() + w.toc.Entries = append(w.toc.Entries, ent) + written += chunkSize + ent = &TOCEntry{ + Name: h.Name, + Type: "chunk", + } + } + } else { + w.toc.Entries = append(w.toc.Entries, ent) + } + if payloadDigest != nil { + regFileEntry.Digest = payloadDigest.Digest().String() + } + if tw != nil { + if err := tw.Flush(); err != nil { + return err + } + } + } + remainDest := io.Discard + if lossless { + remainDest = dst // Preserve the remaining bytes in lossless mode + } + _, err := io.Copy(remainDest, src) + return err +} + +// DiffID returns the SHA-256 of the uncompressed tar bytes. +// It is only valid to call DiffID after Close. +func (w *Writer) DiffID() string { + return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil)) +} + +func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) { + for _, d := range decompressors { + if s := d.FooterSize(); res < s && s <= blobSize { + res = s + } + } + return +} + +func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) { + if len(tocBytes) > 0 { + start := time.Now() + toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) + if err == nil { + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil + } + } + + start := time.Now() + tocBytes = make([]byte, tocSize) + if _, err := sr.ReadAt(tocBytes, tocOff); err != nil { + return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocBytes), err) + } + if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { + opts.telemetry.GetTocLatency(start) + } + start = time.Now() + toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) + if err != nil { + return nil, err + } + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil +} + +func formatModtime(t time.Time) string { + if t.IsZero() || t.Unix() == 0 { + return "" + } + return t.UTC().Round(time.Second).Format(time.RFC3339) +} + +func cleanEntryName(name string) string { + // Use path.Clean to consistently deal with path separators across platforms. + return strings.TrimPrefix(path.Clean("/"+name), "/") +} + +// countWriter counts how many bytes have been written to its wrapped +// io.Writer. +type countWriter struct { + w io.Writer + n int64 +} + +func (cw *countWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + return +} + +// isGzip reports whether br is positioned right before an upcoming gzip stream. +// It does not consume any bytes from br. +func isGzip(br *bufio.Reader) bool { + const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + ) + peek, _ := br.Peek(3) + return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate +} + +func positive(n int64) int64 { + if n < 0 { + return 0 + } + return n +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go new file mode 100644 index 00000000..591d7a62 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -0,0 +1,237 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/binary" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + + digest "github.com/opencontainers/go-digest" +) + +type gzipCompression struct { + *GzipCompressor + *GzipDecompressor +} + +func newGzipCompressionWithLevel(level int) Compression { + return &gzipCompression{ + &GzipCompressor{level}, + &GzipDecompressor{}, + } +} + +func NewGzipCompressor() *GzipCompressor { + return &GzipCompressor{gzip.BestCompression} +} + +func NewGzipCompressorWithLevel(level int) *GzipCompressor { + return &GzipCompressor{level} +} + +type GzipCompressor struct { + compressionLevel int +} + +func (gc *GzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) { + return gzip.NewWriterLevel(w, gc.compressionLevel) +} + +func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) { + tocJSON, err := json.MarshalIndent(toc, "", "\t") + if err != nil { + return "", err + } + gz, _ := gzip.NewWriterLevel(w, gc.compressionLevel) + gw := io.Writer(gz) + if diffHash != nil { + gw = io.MultiWriter(gz, diffHash) + } + tw := tar.NewWriter(gw) + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: TOCTarName, + Size: int64(len(tocJSON)), + }); err != nil { + return "", err + } + if _, err := tw.Write(tocJSON); err != nil { + return "", err + } + + if err := tw.Close(); err != nil { + return "", err + } + if err := gz.Close(); err != nil { + return "", err + } + if _, err := w.Write(gzipFooterBytes(off)); err != nil { + return "", err + } + return digest.FromBytes(tocJSON), nil +} + +// gzipFooterBytes returns the 51 bytes footer. +func gzipFooterBytes(tocOff int64) []byte { + buf := bytes.NewBuffer(make([]byte, 0, FooterSize)) + gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes + + // Extra header indicating the offset of TOCJSON + // https://tools.ietf.org/html/rfc1952#section-2.3.1.1 + header := make([]byte, 4) + header[0], header[1] = 'S', 'G' + subfield := fmt.Sprintf("%016xSTARGZ", tocOff) + binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952 + gz.Header.Extra = append(header, []byte(subfield)...) + gz.Close() + if buf.Len() != FooterSize { + panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize)) + } + return buf.Bytes() +} + +type GzipDecompressor struct{} + +func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + return parseTOCEStargz(r) +} + +func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != FooterSize { + return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, err + } + defer zr.Close() + extra := zr.Header.Extra + si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:] + if si1 != 'S' || si2 != 'G' { + return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2) + } + if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) { + return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ")) + } + if string(subfield[16:]) != "STARGZ" { + return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield") + } + tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) + } + return tocOffset, tocOffset, 0, nil +} + +func (gz *GzipDecompressor) FooterSize() int64 { + return FooterSize +} + +func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + return decompressTOCEStargz(r) +} + +type LegacyGzipDecompressor struct{} + +func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + return parseTOCEStargz(r) +} + +func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != legacyFooterSize { + return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err) + } + defer zr.Close() + extra := zr.Header.Extra + if len(extra) != 16+len("STARGZ") { + return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size") + } + if string(extra[16:]) != "STARGZ" { + return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found") + } + tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) + } + return tocOffset, tocOffset, 0, nil +} + +func (gz *LegacyGzipDecompressor) FooterSize() int64 { + return legacyFooterSize +} + +func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + return decompressTOCEStargz(r) +} + +func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + tr, err := decompressTOCEStargz(r) + if err != nil { + return nil, "", err + } + dgstr := digest.Canonical.Digester() + toc = new(JTOC) + if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil { + return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err) + } + if err := tr.Close(); err != nil { + return nil, "", err + } + return toc, dgstr.Digest(), nil +} + +func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) { + zr, err := gzip.NewReader(r) + if err != nil { + return nil, fmt.Errorf("malformed TOC gzip header: %v", err) + } + zr.Multistream(false) + tr := tar.NewReader(zr) + h, err := tr.Next() + if err != nil { + return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err) + } + if h.Name != TOCTarName { + return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName) + } + return readCloser{tr, zr.Close}, nil +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go new file mode 100644 index 00000000..37448cae --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -0,0 +1,2075 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "testing" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" +) + +// TestingController is Compression with some helper methods necessary for testing. +type TestingController interface { + Compression + CountStreams(*testing.T, []byte) int + DiffIDOf(*testing.T, []byte) string + String() string +} + +// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. +func CompressionTestSuite(t *testing.T, controllers ...TestingController) { + t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) + t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) + t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) +} + +const ( + uncompressedType int = iota + gzipType + zstdType +) + +var srcCompressions = []int{ + uncompressedType, + gzipType, + zstdType, +} + +var allowedPrefix = [4]string{"", "./", "/", "../"} + +// testBuild tests the resulting stargz blob built by this pkg has the same +// contents as the normal stargz blob. +func testBuild(t *testing.T, controllers ...TestingController) { + tests := []struct { + name string + chunkSize int + in []tarEntry + }{ + { + name: "regfiles and directories", + chunkSize: 4, + in: tarOf( + file("foo", "test1"), + dir("foo2/"), + file("foo2/bar", "test2", xAttr(map[string]string{"test": "sample"})), + ), + }, + { + name: "empty files", + chunkSize: 4, + in: tarOf( + file("foo", "tttttt"), + file("foo_empty", ""), + file("foo2", "tttttt"), + file("foo_empty2", ""), + file("foo3", "tttttt"), + file("foo_empty3", ""), + file("foo4", "tttttt"), + file("foo_empty4", ""), + file("foo5", "tttttt"), + file("foo_empty5", ""), + file("foo6", "tttttt"), + ), + }, + { + name: "various files", + chunkSize: 4, + in: tarOf( + file("baz.txt", "bazbazbazbazbazbazbaz"), + file("foo.txt", "a"), + symlink("barlink", "test/bar.txt"), + dir("test/"), + dir("dev/"), + blockdev("dev/testblock", 3, 4), + fifo("dev/testfifo"), + chardev("dev/testchar1", 5, 6), + file("test/bar.txt", "testbartestbar", xAttr(map[string]string{"test2": "sample2"})), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + chardev("dev/testchar2", 1, 2), + ), + }, + { + name: "no contents", + chunkSize: 4, + in: tarOf( + file("baz.txt", ""), + symlink("barlink", "test/bar.txt"), + dir("test/"), + dir("dev/"), + blockdev("dev/testblock", 3, 4), + fifo("dev/testfifo"), + chardev("dev/testchar1", 5, 6), + file("test/bar.txt", "", xAttr(map[string]string{"test2": "sample2"})), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + chardev("dev/testchar2", 1, 2), + ), + }, + } + for _, tt := range tests { + for _, srcCompression := range srcCompressions { + srcCompression := srcCompression + for _, cl := range controllers { + cl := cl + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, prefix := range allowedPrefix { + prefix := prefix + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s", cl, prefix, srcCompression, srcTarFormat), func(t *testing.T) { + tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) + // Test divideEntries() + entries, err := sortEntries(tarBlob, nil, nil) // identical order + if err != nil { + t.Fatalf("failed to parse tar: %v", err) + } + var merged []*entry + for _, part := range divideEntries(entries, 4) { + merged = append(merged, part...) + } + if !reflect.DeepEqual(entries, merged) { + for _, e := range entries { + t.Logf("Original: %v", e.header) + } + for _, e := range merged { + t.Logf("Merged: %v", e.header) + } + t.Errorf("divided entries couldn't be merged") + return + } + + // Prepare sample data + wantBuf := new(bytes.Buffer) + sw := NewWriterWithCompressor(wantBuf, cl) + sw.ChunkSize = tt.chunkSize + if err := sw.AppendTar(tarBlob); err != nil { + t.Fatalf("failed to append tar to want stargz: %v", err) + } + if _, err := sw.Close(); err != nil { + t.Fatalf("failed to prepare want stargz: %v", err) + } + wantData := wantBuf.Bytes() + want, err := Open(io.NewSectionReader( + bytes.NewReader(wantData), 0, int64(len(wantData))), + WithDecompressors(cl), + ) + if err != nil { + t.Fatalf("failed to parse the want stargz: %v", err) + } + + // Prepare testing data + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + WithChunkSize(tt.chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to build stargz: %v", err) + } + defer rc.Close() + gotBuf := new(bytes.Buffer) + if _, err := io.Copy(gotBuf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + gotData := gotBuf.Bytes() + got, err := Open(io.NewSectionReader( + bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), + WithDecompressors(cl), + ) + if err != nil { + t.Fatalf("failed to parse the got stargz: %v", err) + } + + // Check DiffID is properly calculated + rc.Close() + diffID := rc.DiffID() + wantDiffID := cl.DiffIDOf(t, gotData) + if diffID.String() != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } + + // Compare as stargz + if !isSameVersion(t, cl, wantData, gotData) { + t.Errorf("built stargz hasn't same json") + return + } + if !isSameEntries(t, want, got) { + t.Errorf("built stargz isn't same as the original") + return + } + + // Compare as tar.gz + if !isSameTarGz(t, cl, wantData, gotData) { + t.Errorf("built stargz isn't same tar.gz") + return + } + }) + } + } + } + } + } +} + +func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { + aGz, err := controller.Reader(bytes.NewReader(a)) + if err != nil { + t.Fatalf("failed to read A") + } + defer aGz.Close() + bGz, err := controller.Reader(bytes.NewReader(b)) + if err != nil { + t.Fatalf("failed to read B") + } + defer bGz.Close() + + // Same as tar's Next() method but ignores landmarks and TOCJSON file + next := func(r *tar.Reader) (h *tar.Header, err error) { + for { + if h, err = r.Next(); err != nil { + return + } + if h.Name != PrefetchLandmark && + h.Name != NoPrefetchLandmark && + h.Name != TOCTarName { + return + } + } + } + + aTar := tar.NewReader(aGz) + bTar := tar.NewReader(bGz) + for { + // Fetch and parse next header. + aH, aErr := next(aTar) + bH, bErr := next(bTar) + if aErr != nil || bErr != nil { + if aErr == io.EOF && bErr == io.EOF { + break + } + t.Fatalf("Failed to parse tar file: A: %v, B: %v", aErr, bErr) + } + if !reflect.DeepEqual(aH, bH) { + t.Logf("different header (A = %v; B = %v)", aH, bH) + return false + + } + aFile, err := io.ReadAll(aTar) + if err != nil { + t.Fatal("failed to read tar payload of A") + } + bFile, err := io.ReadAll(bTar) + if err != nil { + t.Fatal("failed to read tar payload of B") + } + if !bytes.Equal(aFile, bFile) { + t.Logf("different tar payload (A = %q; B = %q)", string(a), string(b)) + return false + } + } + + return true +} + +func isSameVersion(t *testing.T, controller TestingController, a, b []byte) bool { + aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), controller) + if err != nil { + t.Fatalf("failed to parse A: %v", err) + } + bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), controller) + if err != nil { + t.Fatalf("failed to parse B: %v", err) + } + t.Logf("A: TOCJSON: %v", dumpTOCJSON(t, aJTOC)) + t.Logf("B: TOCJSON: %v", dumpTOCJSON(t, bJTOC)) + return aJTOC.Version == bJTOC.Version +} + +func isSameEntries(t *testing.T, a, b *Reader) bool { + aroot, ok := a.Lookup("") + if !ok { + t.Fatalf("failed to get root of A") + } + broot, ok := b.Lookup("") + if !ok { + t.Fatalf("failed to get root of B") + } + aEntry := stargzEntry{aroot, a} + bEntry := stargzEntry{broot, b} + return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry) +} + +func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader { + buf := new(bytes.Buffer) + var w io.WriteCloser + var err error + if srcCompression == gzipType { + w = gzip.NewWriter(buf) + } else if srcCompression == zstdType { + w, err = zstd.NewWriter(buf) + if err != nil { + t.Fatalf("failed to init zstd writer: %v", err) + } + } else { + return src + } + src.Seek(0, io.SeekStart) + if _, err := io.Copy(w, src); err != nil { + t.Fatalf("failed to compress source") + } + if err := w.Close(); err != nil { + t.Fatalf("failed to finalize compress source") + } + data := buf.Bytes() + return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) + +} + +type stargzEntry struct { + e *TOCEntry + r *Reader +} + +// contains checks if all child entries in "b" are also contained in "a". +// This function also checks if the files/chunks contain the same contents among "a" and "b". +func contains(t *testing.T, a, b stargzEntry) bool { + ae, ar := a.e, a.r + be, br := b.e, b.r + t.Logf("Comparing: %q vs %q", ae.Name, be.Name) + if !equalEntry(ae, be) { + t.Logf("%q != %q: entry: a: %v, b: %v", ae.Name, be.Name, ae, be) + return false + } + if ae.Type == "dir" { + t.Logf("Directory: %q vs %q: %v vs %v", ae.Name, be.Name, + allChildrenName(ae), allChildrenName(be)) + iscontain := true + ae.ForeachChild(func(aBaseName string, aChild *TOCEntry) bool { + // Walk through all files on this stargz file. + + if aChild.Name == PrefetchLandmark || + aChild.Name == NoPrefetchLandmark { + return true // Ignore landmarks + } + + // Ignore a TOCEntry of "./" (formated as "" by stargz lib) on root directory + // because this points to the root directory itself. + if aChild.Name == "" && ae.Name == "" { + return true + } + + bChild, ok := be.LookupChild(aBaseName) + if !ok { + t.Logf("%q (base: %q): not found in b: %v", + ae.Name, aBaseName, allChildrenName(be)) + iscontain = false + return false + } + + childcontain := contains(t, stargzEntry{aChild, a.r}, stargzEntry{bChild, b.r}) + if !childcontain { + t.Logf("%q != %q: non-equal dir", ae.Name, be.Name) + iscontain = false + return false + } + return true + }) + return iscontain + } else if ae.Type == "reg" { + af, err := ar.OpenFile(ae.Name) + if err != nil { + t.Fatalf("failed to open file %q on A: %v", ae.Name, err) + } + bf, err := br.OpenFile(be.Name) + if err != nil { + t.Fatalf("failed to open file %q on B: %v", be.Name, err) + } + + var nr int64 + for nr < ae.Size { + abytes, anext, aok := readOffset(t, af, nr, a) + bbytes, bnext, bok := readOffset(t, bf, nr, b) + if !aok && !bok { + break + } else if !(aok && bok) || anext != bnext { + t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v", + ae.Name, be.Name, nr, aok, bok, anext, bnext) + return false + } + nr = anext + if !bytes.Equal(abytes, bbytes) { + t.Logf("%q != %q: different contents %v vs %v", + ae.Name, be.Name, string(abytes), string(bbytes)) + return false + } + } + return true + } + + return true +} + +func allChildrenName(e *TOCEntry) (children []string) { + e.ForeachChild(func(baseName string, _ *TOCEntry) bool { + children = append(children, baseName) + return true + }) + return +} + +func equalEntry(a, b *TOCEntry) bool { + // Here, we selectively compare fileds that we are interested in. + return a.Name == b.Name && + a.Type == b.Type && + a.Size == b.Size && + a.ModTime3339 == b.ModTime3339 && + a.Stat().ModTime().Equal(b.Stat().ModTime()) && // modTime time.Time + a.LinkName == b.LinkName && + a.Mode == b.Mode && + a.UID == b.UID && + a.GID == b.GID && + a.Uname == b.Uname && + a.Gname == b.Gname && + (a.Offset > 0) == (b.Offset > 0) && + (a.NextOffset() > 0) == (b.NextOffset() > 0) && + a.DevMajor == b.DevMajor && + a.DevMinor == b.DevMinor && + a.NumLink == b.NumLink && + reflect.DeepEqual(a.Xattrs, b.Xattrs) && + // chunk-related infomations aren't compared in this function. + // ChunkOffset int64 `json:"chunkOffset,omitempty"` + // ChunkSize int64 `json:"chunkSize,omitempty"` + // children map[string]*TOCEntry + a.Digest == b.Digest +} + +func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { + ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset) + if !ok { + return nil, 0, false + } + data := make([]byte, ce.ChunkSize) + t.Logf("Offset: %v, NextOffset: %v", ce.Offset, ce.NextOffset()) + n, err := r.ReadAt(data, ce.ChunkOffset) + if err != nil { + t.Fatalf("failed to read file payload of %q (offset:%d,size:%d): %v", + e.e.Name, ce.ChunkOffset, ce.ChunkSize, err) + } + if int64(n) != ce.ChunkSize { + t.Fatalf("unexpected copied data size %d; want %d", + n, ce.ChunkSize) + } + return data[:n], offset + ce.ChunkSize, true +} + +func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { + jtocData, err := json.Marshal(*tocJSON) + if err != nil { + t.Fatalf("failed to marshal TOC JSON: %v", err) + } + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, bytes.NewReader(jtocData)); err != nil { + t.Fatalf("failed to read toc json blob: %v", err) + } + return buf.String() +} + +const chunkSize = 3 + +// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) +type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) + +// testDigestAndVerify runs specified checks against sample stargz blobs. +func testDigestAndVerify(t *testing.T, controllers ...TestingController) { + tests := []struct { + name string + tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) + checks []check + }{ + { + name: "no-regfile", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + dir("test/"), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + dir("test2/"), // modified + ), allowedPrefix[0])), + }, + }, + { + name: "small-files", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + dir("test/"), + regDigest(t, "test/bar.txt", "bbb", dgstMap), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", ""), + file("foo.txt", "M"), // modified + dir("test/"), + file("test/bar.txt", "bbb"), + ), allowedPrefix[0])), + // checkVerifyInvalidTOCEntryFail("foo.txt"), // TODO + checkVerifyBrokenContentFail("foo.txt"), + }, + }, + { + name: "big-files", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + dir("test/"), + regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", "bazbazbazMMMbazbazbaz"), // modified + file("foo.txt", "a"), + dir("test/"), + file("test/bar.txt", "testbartestbar"), + ), allowedPrefix[0])), + checkVerifyInvalidTOCEntryFail("test/bar.txt"), + checkVerifyBrokenContentFail("test/bar.txt"), + }, + }, + { + name: "with-non-regfiles", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + symlink("barlink", "test/bar.txt"), + dir("test/"), + regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", "bazbazbazbazbazbazbaz"), + file("foo.txt", "a"), + symlink("barlink", "test/bar.txt"), + dir("test/"), + file("test/bar.txt", "testbartestbar"), + dir("test2/"), + link("test2/bazlink", "foo.txt"), // modified + ), allowedPrefix[0])), + checkVerifyInvalidTOCEntryFail("test/bar.txt"), + checkVerifyBrokenContentFail("test/bar.txt"), + }, + }, + } + + for _, tt := range tests { + for _, srcCompression := range srcCompressions { + srcCompression := srcCompression + for _, cl := range controllers { + cl := cl + for _, prefix := range allowedPrefix { + prefix := prefix + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s", cl, prefix, srcTarFormat), func(t *testing.T) { + // Get original tar file and chunk digests + dgstMap := make(map[string]digest.Digest) + tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) + + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + WithChunkSize(chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + tocDigest := rc.TOCDigest() + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + newStargz := buf.Bytes() + // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. + dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) + + for _, check := range tt.checks { + check(t, newStargz, tocDigest, dgstMap, cl) + } + }) + } + } + } + } + } +} + +// checkStargzTOC checks the TOC JSON of the passed stargz has the expected +// digest and contains valid chunks. It walks all entries in the stargz and +// checks all chunk digests stored to the TOC JSON match the actual contents. +func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Errorf("failed to parse converted stargz: %v", err) + return + } + digestMapTOC, err := listDigests(io.NewSectionReader( + bytes.NewReader(sgzData), 0, int64(len(sgzData))), + controller, + ) + if err != nil { + t.Fatalf("failed to list digest: %v", err) + } + found := make(map[string]bool) + for id := range dgstMap { + found[id] = false + } + zr, err := controller.Reader(bytes.NewReader(sgzData)) + if err != nil { + t.Fatalf("failed to decompress converted stargz: %v", err) + } + defer zr.Close() + tr := tar.NewReader(zr) + for { + h, err := tr.Next() + if err != nil { + if err != io.EOF { + t.Errorf("failed to read tar entry: %v", err) + return + } + break + } + if h.Name == TOCTarName { + // Check the digest of TOC JSON based on the actual contents + // It's sure that TOC JSON exists in this archive because + // Open succeeded. + dgstr := digest.Canonical.Digester() + if _, err := io.Copy(dgstr.Hash(), tr); err != nil { + t.Fatalf("failed to calculate digest of TOC JSON: %v", + err) + } + if dgstr.Digest() != tocDigest { + t.Errorf("invalid TOC JSON %q; want %q", tocDigest, dgstr.Digest()) + } + continue + } + if _, ok := sgz.Lookup(h.Name); !ok { + t.Errorf("lost stargz entry %q in the converted TOC", h.Name) + return + } + var n int64 + for n < h.Size { + ce, ok := sgz.ChunkEntryForOffset(h.Name, n) + if !ok { + t.Errorf("lost chunk %q(offset=%d) in the converted TOC", + h.Name, n) + return + } + + // Get the original digest to make sure the file contents are kept unchanged + // from the original tar, during the whole conversion steps. + id := chunkID(h.Name, n, ce.ChunkSize) + want, ok := dgstMap[id] + if !ok { + t.Errorf("Unexpected chunk %q(offset=%d,size=%d): %v", + h.Name, n, ce.ChunkSize, dgstMap) + return + } + found[id] = true + + // Check the file contents + dgstr := digest.Canonical.Digester() + if _, err := io.CopyN(dgstr.Hash(), tr, ce.ChunkSize); err != nil { + t.Fatalf("failed to calculate digest of %q (offset=%d,size=%d)", + h.Name, n, ce.ChunkSize) + } + if want != dgstr.Digest() { + t.Errorf("Invalid contents in converted stargz %q: %q; want %q", + h.Name, dgstr.Digest(), want) + return + } + + // Check the digest stored in TOC JSON + dgstTOC, ok := digestMapTOC[ce.Offset] + if !ok { + t.Errorf("digest of %q(offset=%d,size=%d,chunkOffset=%d) isn't registered", + h.Name, ce.Offset, ce.ChunkSize, ce.ChunkOffset) + } + if want != dgstTOC { + t.Errorf("Invalid digest in TOCEntry %q: %q; want %q", + h.Name, dgstTOC, want) + return + } + + n += ce.ChunkSize + } + } + + for id, ok := range found { + if !ok { + t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) + } + } +} + +// checkVerifyTOC checks the verification works for the TOC JSON of the passed +// stargz. It walks all entries in the stargz and checks the verifications for +// all chunks work. +func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Errorf("failed to parse converted stargz: %v", err) + return + } + ev, err := sgz.VerifyTOC(tocDigest) + if err != nil { + t.Errorf("failed to verify stargz: %v", err) + return + } + + found := make(map[string]bool) + for id := range dgstMap { + found[id] = false + } + zr, err := controller.Reader(bytes.NewReader(sgzData)) + if err != nil { + t.Fatalf("failed to decompress converted stargz: %v", err) + } + defer zr.Close() + tr := tar.NewReader(zr) + for { + h, err := tr.Next() + if err != nil { + if err != io.EOF { + t.Errorf("failed to read tar entry: %v", err) + return + } + break + } + if h.Name == TOCTarName { + continue + } + if _, ok := sgz.Lookup(h.Name); !ok { + t.Errorf("lost stargz entry %q in the converted TOC", h.Name) + return + } + var n int64 + for n < h.Size { + ce, ok := sgz.ChunkEntryForOffset(h.Name, n) + if !ok { + t.Errorf("lost chunk %q(offset=%d) in the converted TOC", + h.Name, n) + return + } + + v, err := ev.Verifier(ce) + if err != nil { + t.Errorf("failed to get verifier for %q(offset=%d)", h.Name, n) + } + + found[chunkID(h.Name, n, ce.ChunkSize)] = true + + // Check the file contents + if _, err := io.CopyN(v, tr, ce.ChunkSize); err != nil { + t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", + h.Name, n, ce.ChunkSize) + } + if !v.Verified() { + t.Errorf("Invalid contents in converted stargz %q (should be succeeded)", + h.Name) + return + } + n += ce.ChunkSize + } + } + + for id, ok := range found { + if !ok { + t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) + } + } +} + +// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be +// detected during the verification and the verification returns an error. +func checkVerifyInvalidTOCEntryFail(filename string) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + funcs := map[string]rewriteFunc{ + "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + var found bool + for _, e := range toc.Entries { + if cleanEntryName(e.Name) == filename { + if e.Type != "reg" && e.Type != "chunk" { + t.Fatalf("entry %q to break must be regfile or chunk", filename) + } + if e.ChunkDigest == "" { + t.Fatalf("entry %q is already invalid", filename) + } + e.ChunkDigest = "" + found = true + } + } + if !found { + t.Fatalf("rewrite target not found") + } + }, + "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + var ( + sampleEntry *TOCEntry + targetEntry *TOCEntry + ) + for _, e := range toc.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if cleanEntryName(e.Name) == filename { + targetEntry = e + } else { + sampleEntry = e + } + } + } + if sampleEntry == nil { + t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target") + } + if targetEntry == nil { + t.Fatalf("rewrite target not found") + } + targetEntry.Offset = sampleEntry.Offset + }, + } + + for name, rFunc := range funcs { + t.Run(name, func(t *testing.T) { + newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller) + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, newSgz); err != nil { + t.Fatalf("failed to get converted stargz") + } + isgz := buf.Bytes() + + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(isgz), 0, int64(len(isgz))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + _, err = sgz.VerifyTOC(newTocDigest) + if err == nil { + t.Errorf("must fail for invalid TOC") + return + } + }) + } + } +} + +// checkVerifyInvalidStargzFail checks if the verification detects that the +// given stargz file doesn't match to the expected digest and returns error. +func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(controller)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + mStargz := buf.Bytes() + + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + _, err = sgz.VerifyTOC(tocDigest) + if err == nil { + t.Errorf("must fail for invalid TOC") + return + } + } +} + +// checkVerifyBrokenContentFail checks if the verifier detects broken contents +// that doesn't match to the expected digest and returns error. +func checkVerifyBrokenContentFail(filename string) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + // Parse stargz file + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + ev, err := sgz.VerifyTOC(tocDigest) + if err != nil { + t.Fatalf("failed to verify stargz: %v", err) + return + } + + // Open the target file + sr, err := sgz.OpenFile(filename) + if err != nil { + t.Fatalf("failed to open file %q", filename) + } + ce, ok := sgz.ChunkEntryForOffset(filename, 0) + if !ok { + t.Fatalf("lost chunk %q(offset=%d) in the converted TOC", filename, 0) + return + } + if ce.ChunkSize == 0 { + t.Fatalf("file mustn't be empty") + return + } + data := make([]byte, ce.ChunkSize) + if _, err := sr.ReadAt(data, ce.ChunkOffset); err != nil { + t.Errorf("failed to get data of a chunk of %q(offset=%q)", + filename, ce.ChunkOffset) + } + + // Check the broken chunk (must fail) + v, err := ev.Verifier(ce) + if err != nil { + t.Fatalf("failed to get verifier for %q", filename) + } + broken := append([]byte{^data[0]}, data[1:]...) + if _, err := io.CopyN(v, bytes.NewReader(broken), ce.ChunkSize); err != nil { + t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", + filename, ce.ChunkOffset, ce.ChunkSize) + } + if v.Verified() { + t.Errorf("verification must fail for broken file chunk %q(org:%q,broken:%q)", + filename, data, broken) + } + } +} + +func chunkID(name string, offset, size int64) string { + return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size) +} + +type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader) + +func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { + decodedJTOC, jtocOffset, err := parseStargz(sgz, controller) + if err != nil { + t.Fatalf("failed to extract TOC JSON: %v", err) + } + + rewrite(t, decodedJTOC, sgz) + + tocFooter, tocDigest, err := tocAndFooter(controller, decodedJTOC, jtocOffset) + if err != nil { + t.Fatalf("failed to create toc and footer: %v", err) + } + + // Reconstruct stargz file with the modified TOC JSON + if _, err := sgz.Seek(0, io.SeekStart); err != nil { + t.Fatalf("failed to reset the seek position of stargz: %v", err) + } + return io.MultiReader( + io.LimitReader(sgz, jtocOffset), // Original stargz (before TOC JSON) + tocFooter, // Rewritten TOC and footer + ), tocDigest +} + +func listDigests(sgz *io.SectionReader, controller TestingController) (map[int64]digest.Digest, error) { + decodedJTOC, _, err := parseStargz(sgz, controller) + if err != nil { + return nil, err + } + digestMap := make(map[int64]digest.Digest) + for _, e := range decodedJTOC.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if e.Type == "reg" && e.Size == 0 { + continue // ignores empty file + } + if e.ChunkDigest == "" { + return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON", + e.Name, e.Offset) + } + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, err + } + digestMap[e.Offset] = d + } + } + return digestMap, nil +} + +func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJTOC *JTOC, jtocOffset int64, err error) { + fSize := controller.FooterSize() + footer := make([]byte, fSize) + if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil { + return nil, 0, fmt.Errorf("error reading footer: %w", err) + } + _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):]) + if err != nil { + return nil, 0, fmt.Errorf("failed to parse footer: %w", err) + } + + // Decode the TOC JSON + tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) + decodedJTOC, _, err = controller.ParseTOC(tocReader) + if err != nil { + return nil, 0, fmt.Errorf("failed to parse TOC: %w", err) + } + return decodedJTOC, tocOffset, nil +} + +func testWriteAndOpen(t *testing.T, controllers ...TestingController) { + const content = "Some contents" + invalidUtf8 := "\xff\xfe\xfd" + + xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8} + sampleOwner := owner{uid: 50, gid: 100} + + tests := []struct { + name string + chunkSize int + in []tarEntry + want []stargzCheck + wantNumGz int // expected number of streams + + wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz + wantFailOnLossLess bool + }{ + { + name: "empty", + in: tarOf(), + wantNumGz: 2, // empty tar + TOC + footer + wantNumGzLossLess: 3, // empty tar + TOC + footer + want: checks( + numTOCEntries(0), + ), + }, + { + name: "1dir_1empty_file", + in: tarOf( + dir("foo/"), + file("foo/bar.txt", ""), + ), + wantNumGz: 3, // dir, TOC, footer + want: checks( + numTOCEntries(2), + hasDir("foo/"), + hasFileLen("foo/bar.txt", 0), + entryHasChildren("foo", "bar.txt"), + hasFileDigest("foo/bar.txt", digestFor("")), + ), + }, + { + name: "1dir_1file", + in: tarOf( + dir("foo/"), + file("foo/bar.txt", content, xAttrFile), + ), + wantNumGz: 4, // var dir, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(2), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + hasFileDigest("foo/bar.txt", digestFor(content)), + hasFileContentsRange("foo/bar.txt", 0, content), + hasFileContentsRange("foo/bar.txt", 1, content[1:]), + entryHasChildren("", "foo"), + entryHasChildren("foo", "bar.txt"), + hasFileXattrs("foo/bar.txt", "foo", "bar"), + hasFileXattrs("foo/bar.txt", "invalid-utf8", invalidUtf8), + ), + }, + { + name: "2meta_2file", + in: tarOf( + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + ), + wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(3), + hasDir("bar/"), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + entryHasChildren("", "bar", "foo"), + entryHasChildren("foo", "bar.txt"), + hasChunkEntries("foo/bar.txt", 1), + hasEntryOwner("bar/", sampleOwner), + hasEntryOwner("foo/", sampleOwner), + hasEntryOwner("foo/bar.txt", sampleOwner), + ), + }, + { + name: "3dir", + in: tarOf( + dir("bar/"), + dir("foo/"), + dir("foo/bar/"), + ), + wantNumGz: 3, // 3 dirs, TOC, footer + want: checks( + hasDirLinkCount("bar/", 2), + hasDirLinkCount("foo/", 3), + hasDirLinkCount("foo/bar/", 2), + ), + }, + { + name: "symlink", + in: tarOf( + dir("foo/"), + symlink("foo/bar", "../../x"), + ), + wantNumGz: 3, // metas + TOC + footer + want: checks( + numTOCEntries(2), + hasSymlink("foo/bar", "../../x"), + entryHasChildren("", "foo"), + entryHasChildren("foo", "bar"), + ), + }, + { + name: "chunked_file", + chunkSize: 4, + in: tarOf( + dir("foo/"), + file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"), + ), + wantNumGz: 9, + want: checks( + numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file + hasDir("foo/"), + hasFileLen("foo/big.txt", len("This is such a big file")), + hasFileDigest("foo/big.txt", digestFor("This is such a big file")), + hasFileContentsRange("foo/big.txt", 0, "This is such a big file"), + hasFileContentsRange("foo/big.txt", 1, "his is such a big file"), + hasFileContentsRange("foo/big.txt", 2, "is is such a big file"), + hasFileContentsRange("foo/big.txt", 3, "s is such a big file"), + hasFileContentsRange("foo/big.txt", 4, " is such a big file"), + hasFileContentsRange("foo/big.txt", 5, "is such a big file"), + hasFileContentsRange("foo/big.txt", 6, "s such a big file"), + hasFileContentsRange("foo/big.txt", 7, " such a big file"), + hasFileContentsRange("foo/big.txt", 8, "such a big file"), + hasFileContentsRange("foo/big.txt", 9, "uch a big file"), + hasFileContentsRange("foo/big.txt", 10, "ch a big file"), + hasFileContentsRange("foo/big.txt", 11, "h a big file"), + hasFileContentsRange("foo/big.txt", 12, " a big file"), + hasFileContentsRange("foo/big.txt", len("This is such a big file")-1, ""), + hasChunkEntries("foo/big.txt", 6), + ), + }, + { + name: "recursive", + in: tarOf( + dir("/", sampleOwner), + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + ), + wantNumGz: 4, // dirs, bar.txt alone, TOC, footer + want: checks( + maxDepth(2), // 0: root directory, 1: "foo/", 2: "bar.txt" + ), + }, + { + name: "block_char_fifo", + in: tarOf( + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "b", + Typeflag: tar.TypeBlock, + Devmajor: 123, + Devminor: 456, + Format: format, + }) + }), + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "c", + Typeflag: tar.TypeChar, + Devmajor: 111, + Devminor: 222, + Format: format, + }) + }), + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "f", + Typeflag: tar.TypeFifo, + Format: format, + }) + }), + ), + wantNumGz: 3, + want: checks( + lookupMatch("b", &TOCEntry{Name: "b", Type: "block", DevMajor: 123, DevMinor: 456, NumLink: 1}), + lookupMatch("c", &TOCEntry{Name: "c", Type: "char", DevMajor: 111, DevMinor: 222, NumLink: 1}), + lookupMatch("f", &TOCEntry{Name: "f", Type: "fifo", NumLink: 1}), + ), + }, + { + name: "modes", + in: tarOf( + dir("foo1/", 0755|os.ModeDir|os.ModeSetgid), + file("foo1/bar1", content, 0700|os.ModeSetuid), + file("foo1/bar2", content, 0755|os.ModeSetgid), + dir("foo2/", 0755|os.ModeDir|os.ModeSticky), + file("foo2/bar3", content, 0755|os.ModeSticky), + dir("foo3/", 0755|os.ModeDir), + file("foo3/bar4", content, os.FileMode(0700)), + file("foo3/bar5", content, os.FileMode(0755)), + ), + wantNumGz: 8, // dir, bar1 alone, bar2 alone + dir, bar3 alone + dir, bar4 alone, bar5 alone, TOC, footer + want: checks( + hasMode("foo1/", 0755|os.ModeDir|os.ModeSetgid), + hasMode("foo1/bar1", 0700|os.ModeSetuid), + hasMode("foo1/bar2", 0755|os.ModeSetgid), + hasMode("foo2/", 0755|os.ModeDir|os.ModeSticky), + hasMode("foo2/bar3", 0755|os.ModeSticky), + hasMode("foo3/", 0755|os.ModeDir), + hasMode("foo3/bar4", os.FileMode(0700)), + hasMode("foo3/bar5", os.FileMode(0755)), + ), + }, + { + name: "lossy", + in: tarOf( + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + file(TOCTarName, "dummy"), // ignored by the writer. (lossless write returns error) + ), + wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(3), + hasDir("bar/"), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + entryHasChildren("", "bar", "foo"), + entryHasChildren("foo", "bar.txt"), + hasChunkEntries("foo/bar.txt", 1), + hasEntryOwner("bar/", sampleOwner), + hasEntryOwner("foo/", sampleOwner), + hasEntryOwner("foo/bar.txt", sampleOwner), + ), + wantFailOnLossLess: true, + }, + { + name: "hardlink should be replaced to the destination entry", + in: tarOf( + dir("foo/"), + file("foo/foo1", "test"), + link("foolink", "foo/foo1"), + ), + wantNumGz: 4, // dir, foo1 + link, TOC, footer + want: checks( + mustSameEntry("foo/foo1", "foolink"), + ), + }, + } + + for _, tt := range tests { + for _, cl := range controllers { + cl := cl + for _, prefix := range allowedPrefix { + prefix := prefix + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, lossless := range []bool{true, false} { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", cl, prefix, lossless, srcTarFormat), func(t *testing.T) { + var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) + origTarDgstr := digest.Canonical.Digester() + tr = io.TeeReader(tr, origTarDgstr.Hash()) + var stargzBuf bytes.Buffer + w := NewWriterWithCompressor(&stargzBuf, cl) + w.ChunkSize = tt.chunkSize + if lossless { + err := w.AppendTarLossLess(tr) + if tt.wantFailOnLossLess { + if err != nil { + return // expected to fail + } + t.Fatalf("Append wanted to fail on lossless") + } + if err != nil { + t.Fatalf("Append(lossless): %v", err) + } + } else { + if err := w.AppendTar(tr); err != nil { + t.Fatalf("Append: %v", err) + } + } + if _, err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + b := stargzBuf.Bytes() + + if lossless { + // Check if the result blob reserves original tar metadata + rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl) + if err != nil { + t.Errorf("failed to decompress blob: %v", err) + return + } + defer rc.Close() + resultDgstr := digest.Canonical.Digester() + if _, err := io.Copy(resultDgstr.Hash(), rc); err != nil { + t.Errorf("failed to read result decompressed blob: %v", err) + return + } + if resultDgstr.Digest() != origTarDgstr.Digest() { + t.Errorf("lossy compression occurred: digest=%v; want %v", + resultDgstr.Digest(), origTarDgstr.Digest()) + return + } + } + + diffID := w.DiffID() + wantDiffID := cl.DiffIDOf(t, b) + if diffID != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } + + got := cl.CountStreams(t, b) + wantNumGz := tt.wantNumGz + if lossless && tt.wantNumGzLossLess > 0 { + wantNumGz = tt.wantNumGzLossLess + } + if got != wantNumGz { + t.Errorf("number of streams = %d; want %d", got, wantNumGz) + } + + telemetry, checkCalled := newCalledTelemetry() + r, err := Open( + io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), + WithDecompressors(cl), + WithTelemetry(telemetry), + ) + if err != nil { + t.Fatalf("stargz.Open: %v", err) + } + if err := checkCalled(); err != nil { + t.Errorf("telemetry failure: %v", err) + } + for _, want := range tt.want { + want.check(t, r) + } + }) + } + } + } + } + } +} + +func newCalledTelemetry() (telemetry *Telemetry, check func() error) { + var getFooterLatencyCalled bool + var getTocLatencyCalled bool + var deserializeTocLatencyCalled bool + return &Telemetry{ + func(time.Time) { getFooterLatencyCalled = true }, + func(time.Time) { getTocLatencyCalled = true }, + func(time.Time) { deserializeTocLatencyCalled = true }, + }, func() error { + var allErr []error + if !getFooterLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called")) + } + if !getTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) + } + if !deserializeTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called")) + } + return errorutil.Aggregate(allErr) + } +} + +func digestFor(content string) string { + sum := sha256.Sum256([]byte(content)) + return fmt.Sprintf("sha256:%x", sum) +} + +type numTOCEntries int + +func (n numTOCEntries) check(t *testing.T, r *Reader) { + if r.toc == nil { + t.Fatal("nil TOC") + } + if got, want := len(r.toc.Entries), int(n); got != want { + t.Errorf("got %d TOC entries; want %d", got, want) + } + t.Logf("got TOC entries:") + for i, ent := range r.toc.Entries { + entj, _ := json.Marshal(ent) + t.Logf(" [%d]: %s\n", i, entj) + } + if t.Failed() { + t.FailNow() + } +} + +func checks(s ...stargzCheck) []stargzCheck { return s } + +type stargzCheck interface { + check(t *testing.T, r *Reader) +} + +type stargzCheckFn func(*testing.T, *Reader) + +func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) } + +func maxDepth(max int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + e, ok := r.Lookup("") + if !ok { + t.Fatal("root directory not found") + } + d, err := getMaxDepth(t, e, 0, 10*max) + if err != nil { + t.Errorf("failed to get max depth (wanted %d): %v", max, err) + return + } + if d != max { + t.Errorf("invalid depth %d; want %d", d, max) + return + } + }) +} + +func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) { + if current > limit { + return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d", + current, limit) + } + max = current + e.ForeachChild(func(baseName string, ent *TOCEntry) bool { + t.Logf("%q(basename:%q) is child of %q\n", ent.Name, baseName, e.Name) + d, err := getMaxDepth(t, ent, current+1, limit) + if err != nil { + rErr = err + return false + } + if d > max { + max = d + } + return true + }) + return +} + +func hasFileLen(file string, wantLen int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "reg" { + t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) + } else if ent.Size != int64(wantLen) { + t.Errorf("file size of %q = %d; want %d", file, ent.Size, wantLen) + } + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasFileXattrs(file, name, value string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "reg" { + t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) + } + if ent.Xattrs == nil { + t.Errorf("file %q has no xattrs", file) + return + } + valueFound, found := ent.Xattrs[name] + if !found { + t.Errorf("file %q has no xattr %q", file, name) + return + } + if string(valueFound) != value { + t.Errorf("file %q has xattr %q with value %q instead of %q", file, name, valueFound, value) + } + + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasFileDigest(file string, digest string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(file) + if !ok { + t.Fatalf("didn't find TOCEntry for file %q", file) + } + if ent.Digest != digest { + t.Fatalf("Digest(%q) = %q, want %q", file, ent.Digest, digest) + } + }) +} + +func hasFileContentsRange(file string, offset int, want string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + f, err := r.OpenFile(file) + if err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + n, err := f.ReadAt(got, int64(offset)) + if err != nil { + t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err) + } + if string(got) != want { + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, got, want) + } + }) +} + +func hasChunkEntries(file string, wantChunks int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(file) + if !ok { + t.Fatalf("no file for %q", file) + } + if ent.Type != "reg" { + t.Fatalf("file %q has unexpected type %q; want reg", file, ent.Type) + } + chunks := r.getChunks(ent) + if len(chunks) != wantChunks { + t.Errorf("len(r.getChunks(%q)) = %d; want %d", file, len(chunks), wantChunks) + return + } + f := chunks[0] + + var gotChunks []*TOCEntry + var last *TOCEntry + for off := int64(0); off < f.Size; off++ { + e, ok := r.ChunkEntryForOffset(file, off) + if !ok { + t.Errorf("no ChunkEntryForOffset at %d", off) + return + } + if last != e { + gotChunks = append(gotChunks, e) + last = e + } + } + if !reflect.DeepEqual(chunks, gotChunks) { + t.Errorf("gotChunks=%d, want=%d; contents mismatch", len(gotChunks), wantChunks) + } + + // And verify the NextOffset + for i := 0; i < len(gotChunks)-1; i++ { + ci := gotChunks[i] + cnext := gotChunks[i+1] + if ci.NextOffset() != cnext.Offset { + t.Errorf("chunk %d NextOffset %d != next chunk's Offset of %d", i, ci.NextOffset(), cnext.Offset) + } + } + }) +} + +func entryHasChildren(dir string, want ...string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + want := append([]string(nil), want...) + var got []string + ent, ok := r.Lookup(dir) + if !ok { + t.Fatalf("didn't find TOCEntry for dir node %q", dir) + } + for baseName := range ent.children { + got = append(got, baseName) + } + sort.Strings(got) + sort.Strings(want) + if !reflect.DeepEqual(got, want) { + t.Errorf("children of %q = %q; want %q", dir, got, want) + } + }) +} + +func hasDir(file string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Type != "dir" { + t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) + } + return + } + } + t.Errorf("directory %q not found", file) + }) +} + +func hasDirLinkCount(file string, count int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Type != "dir" { + t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) + return + } + if ent.NumLink != count { + t.Errorf("link count of %q = %d; want %d", file, ent.NumLink, count) + } + return + } + } + t.Errorf("directory %q not found", file) + }) +} + +func hasMode(file string, mode os.FileMode) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Stat().Mode() != mode { + t.Errorf("invalid mode: got %v; want %v", ent.Stat().Mode(), mode) + return + } + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasSymlink(file, target string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "symlink" { + t.Errorf("file type of %q is %q; want \"symlink\"", file, ent.Type) + } else if ent.LinkName != target { + t.Errorf("link target of symlink %q is %q; want %q", file, ent.LinkName, target) + } + return + } + } + t.Errorf("symlink %q not found", file) + }) +} + +func lookupMatch(name string, want *TOCEntry) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + e, ok := r.Lookup(name) + if !ok { + t.Fatalf("failed to Lookup entry %q", name) + } + if !reflect.DeepEqual(e, want) { + t.Errorf("entry %q mismatch.\n got: %+v\nwant: %+v\n", name, e, want) + } + + }) +} + +func hasEntryOwner(entry string, owner owner) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(strings.TrimSuffix(entry, "/")) + if !ok { + t.Errorf("entry %q not found", entry) + return + } + if ent.UID != owner.uid || ent.GID != owner.gid { + t.Errorf("entry %q has invalid owner (uid:%d, gid:%d) instead of (uid:%d, gid:%d)", entry, ent.UID, ent.GID, owner.uid, owner.gid) + return + } + }) +} + +func mustSameEntry(files ...string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + var first *TOCEntry + for _, f := range files { + if first == nil { + var ok bool + first, ok = r.Lookup(f) + if !ok { + t.Errorf("unknown first file on Lookup: %q", f) + return + } + } + + // Test Lookup + e, ok := r.Lookup(f) + if !ok { + t.Errorf("unknown file on Lookup: %q", f) + return + } + if e != first { + t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first) + return + } + + // Test LookupChild + pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f))) + if !ok { + t.Errorf("failed to get parent of %q", f) + return + } + e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f))) + if !ok { + t.Errorf("failed to get %q as the child of %+v", f, pe) + return + } + if e != first { + t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first) + return + } + + // Test ForeachChild + pe.ForeachChild(func(baseName string, e *TOCEntry) bool { + if baseName == filepath.Base(filepath.Clean(f)) { + if e != first { + t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first) + return false + } + } + return true + }) + } + }) +} + +func tarOf(s ...tarEntry) []tarEntry { return s } + +type tarEntry interface { + appendTar(tw *tar.Writer, prefix string, format tar.Format) error +} + +type tarEntryFunc func(*tar.Writer, string, tar.Format) error + +func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format) error { + return f(tw, prefix, format) +} + +func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { + format := tar.FormatUnknown + for _, opt := range opts { + switch v := opt.(type) { + case tar.Format: + format = v + default: + panic(fmt.Errorf("unsupported opt for buildTar: %v", opt)) + } + } + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, ent := range ents { + if err := ent.appendTar(tw, prefix, format); err != nil { + t.Fatalf("building input tar: %v", err) + } + } + if err := tw.Close(); err != nil { + t.Errorf("closing write of input tar: %v", err) + } + data := append(buf.Bytes(), make([]byte, 100)...) // append empty bytes at the tail to see lossless works + return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) +} + +func dir(name string, opts ...interface{}) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + var o owner + mode := os.FileMode(0755) + for _, opt := range opts { + switch v := opt.(type) { + case owner: + o = v + case os.FileMode: + mode = v + default: + return errors.New("unsupported opt") + } + } + if !strings.HasSuffix(name, "/") { + panic(fmt.Sprintf("missing trailing slash in dir %q ", name)) + } + tm, err := fileModeToTarMode(mode) + if err != nil { + return err + } + return tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeDir, + Name: prefix + name, + Mode: tm, + Uid: o.uid, + Gid: o.gid, + Format: format, + }) + }) +} + +// xAttr are extended attributes to set on test files created with the file func. +type xAttr map[string]string + +// owner is owner ot set on test files and directories with the file and dir functions. +type owner struct { + uid int + gid int +} + +func file(name, contents string, opts ...interface{}) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + var xattrs xAttr + var o owner + mode := os.FileMode(0644) + for _, opt := range opts { + switch v := opt.(type) { + case xAttr: + xattrs = v + case owner: + o = v + case os.FileMode: + mode = v + default: + return errors.New("unsupported opt") + } + } + if strings.HasSuffix(name, "/") { + return fmt.Errorf("bogus trailing slash in file %q", name) + } + tm, err := fileModeToTarMode(mode) + if err != nil { + return err + } + if len(xattrs) > 0 { + format = tar.FormatPAX // only PAX supports xattrs + } + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: prefix + name, + Mode: tm, + Xattrs: xattrs, + Size: int64(len(contents)), + Uid: o.uid, + Gid: o.gid, + Format: format, + }); err != nil { + return err + } + _, err = io.WriteString(tw, contents) + return err + }) +} + +func symlink(name, target string) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + return tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeSymlink, + Name: prefix + name, + Linkname: target, + Mode: 0644, + Format: format, + }) + }) +} + +func link(name string, linkname string) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeLink, + Name: prefix + name, + Linkname: linkname, + ModTime: now, + Format: format, + }) + }) +} + +func chardev(name string, major, minor int64) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeChar, + Name: prefix + name, + Devmajor: major, + Devminor: minor, + ModTime: now, + Format: format, + }) + }) +} + +func blockdev(name string, major, minor int64) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeBlock, + Name: prefix + name, + Devmajor: major, + Devminor: minor, + ModTime: now, + Format: format, + }) + }) +} +func fifo(name string) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeFifo, + Name: prefix + name, + ModTime: now, + Format: format, + }) + }) +} + +func prefetchLandmark() tarEntry { + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + Format: format, + }); err != nil { + return err + } + contents := []byte{landmarkContents} + if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { + return err + } + return nil + }) +} + +func noPrefetchLandmark() tarEntry { + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + Format: format, + }); err != nil { + return err + } + contents := []byte{landmarkContents} + if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { + return err + } + return nil + }) +} + +func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { + if digestMap == nil { + t.Fatalf("digest map mustn't be nil") + } + content := []byte(contentStr) + + var n int64 + for n < int64(len(content)) { + size := int64(chunkSize) + remain := int64(len(content)) - n + if remain < size { + size = remain + } + dgstr := digest.Canonical.Digester() + if _, err := io.CopyN(dgstr.Hash(), bytes.NewReader(content[n:n+size]), size); err != nil { + t.Fatalf("failed to calculate digest of %q (name=%q,offset=%d,size=%d)", + string(content[n:n+size]), name, n, size) + } + digestMap[chunkID(name, n, size)] = dgstr.Digest() + n += size + } + + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: prefix + name, + Size: int64(len(content)), + Format: format, + }); err != nil { + return err + } + if _, err := io.CopyN(w, bytes.NewReader(content), int64(len(content))); err != nil { + return err + } + return nil + }) +} + +func fileModeToTarMode(mode os.FileMode) (int64, error) { + h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "") + if err != nil { + return 0, err + } + return h.Mode, nil +} + +// fileInfoOnlyMode is os.FileMode that populates only file mode. +type fileInfoOnlyMode os.FileMode + +func (f fileInfoOnlyMode) Name() string { return "" } +func (f fileInfoOnlyMode) Size() int64 { return 0 } +func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) } +func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } +func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } +func (f fileInfoOnlyMode) Sys() interface{} { return nil } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go new file mode 100644 index 00000000..3bc74463 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go @@ -0,0 +1,317 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "hash" + "io" + "os" + "path" + "time" + + digest "github.com/opencontainers/go-digest" +) + +const ( + // TOCTarName is the name of the JSON file in the tar archive in the + // table of contents gzip stream. + TOCTarName = "stargz.index.json" + + // FooterSize is the number of bytes in the footer + // + // The footer is an empty gzip stream with no compression and an Extra + // header of the form "%016xSTARGZ", where the 64 bit hex-encoded + // number is the offset to the gzip stream of JSON TOC. + // + // 51 comes from: + // + // 10 bytes gzip header + // 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ")) + // 2 bytes Extra: SI1 = 'S', SI2 = 'G' + // 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ")) + // 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC) + // 5 bytes flate header + // 8 bytes gzip footer + // (End of the eStargz blob) + // + // NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz. + FooterSize = 51 + + // legacyFooterSize is the number of bytes in the legacy stargz footer. + // + // 47 comes from: + // + // 10 byte gzip header + + // 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" + + // 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset)) + // 5 byte flate header + // 8 byte gzip footer (two little endian uint32s: digest, size) + legacyFooterSize = 47 + + // TOCJSONDigestAnnotation is an annotation for an image layer. This stores the + // digest of the TOC JSON. + // This annotation is valid only when it is specified in `.[]layers.annotations` + // of an image manifest. + TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" + + // StoreUncompressedSizeAnnotation is an additional annotation key for eStargz to enable lazy + // pulling on containers/storage. Stargz Store is required to expose the layer's uncompressed size + // to the runtime but current OCI image doesn't ship this information by default. So we store this + // to the special annotation. + StoreUncompressedSizeAnnotation = "io.containers.estargz.uncompressed-size" + + // PrefetchLandmark is a file entry which indicates the end position of + // prefetch in the stargz file. + PrefetchLandmark = ".prefetch.landmark" + + // NoPrefetchLandmark is a file entry which indicates that no prefetch should + // occur in the stargz file. + NoPrefetchLandmark = ".no.prefetch.landmark" + + landmarkContents = 0xf +) + +// JTOC is the JSON-serialized table of contents index of the files in the stargz file. +type JTOC struct { + Version int `json:"version"` + Entries []*TOCEntry `json:"entries"` +} + +// TOCEntry is an entry in the stargz file's TOC (Table of Contents). +type TOCEntry struct { + // Name is the tar entry's name. It is the complete path + // stored in the tar file, not just the base name. + Name string `json:"name"` + + // Type is one of "dir", "reg", "symlink", "hardlink", "char", + // "block", "fifo", or "chunk". + // The "chunk" type is used for regular file data chunks past the first + // TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset, + // ChunkOffset, and ChunkSize populated. + Type string `json:"type"` + + // Size, for regular files, is the logical size of the file. + Size int64 `json:"size,omitempty"` + + // ModTime3339 is the modification time of the tar entry. Empty + // means zero or unknown. Otherwise it's in UTC RFC3339 + // format. Use the ModTime method to access the time.Time value. + ModTime3339 string `json:"modtime,omitempty"` + modTime time.Time + + // LinkName, for symlinks and hardlinks, is the link target. + LinkName string `json:"linkName,omitempty"` + + // Mode is the permission and mode bits. + Mode int64 `json:"mode,omitempty"` + + // UID is the user ID of the owner. + UID int `json:"uid,omitempty"` + + // GID is the group ID of the owner. + GID int `json:"gid,omitempty"` + + // Uname is the username of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same UID. + Uname string `json:"userName,omitempty"` + + // Gname is the group name of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same GID. + Gname string `json:"groupName,omitempty"` + + // Offset, for regular files, provides the offset in the + // stargz file to the file's data bytes. See ChunkOffset and + // ChunkSize. + Offset int64 `json:"offset,omitempty"` + + nextOffset int64 // the Offset of the next entry with a non-zero Offset + + // DevMajor is the major device number for "char" and "block" types. + DevMajor int `json:"devMajor,omitempty"` + + // DevMinor is the major device number for "char" and "block" types. + DevMinor int `json:"devMinor,omitempty"` + + // NumLink is the number of entry names pointing to this entry. + // Zero means one name references this entry. + // This field is calculated during runtime and not recorded in TOC JSON. + NumLink int `json:"-"` + + // Xattrs are the extended attribute for the entry. + Xattrs map[string][]byte `json:"xattrs,omitempty"` + + // Digest stores the OCI checksum for regular files payload. + // It has the form "sha256:abcdef01234....". + Digest string `json:"digest,omitempty"` + + // ChunkOffset is non-zero if this is a chunk of a large, + // regular file. If so, the Offset is where the gzip header of + // ChunkSize bytes at ChunkOffset in Name begin. + // + // In serialized form, a "chunkSize" JSON field of zero means + // that the chunk goes to the end of the file. After reading + // from the stargz TOC, though, the ChunkSize is initialized + // to a non-zero file for when Type is either "reg" or + // "chunk". + ChunkOffset int64 `json:"chunkOffset,omitempty"` + ChunkSize int64 `json:"chunkSize,omitempty"` + + // ChunkDigest stores an OCI digest of the chunk. This must be formed + // as "sha256:0123abcd...". + ChunkDigest string `json:"chunkDigest,omitempty"` + + children map[string]*TOCEntry +} + +// ModTime returns the entry's modification time. +func (e *TOCEntry) ModTime() time.Time { return e.modTime } + +// NextOffset returns the position (relative to the start of the +// stargz file) of the next gzip boundary after e.Offset. +func (e *TOCEntry) NextOffset() int64 { return e.nextOffset } + +func (e *TOCEntry) addChild(baseName string, child *TOCEntry) { + if e.children == nil { + e.children = make(map[string]*TOCEntry) + } + if child.Type == "dir" { + e.NumLink++ // Entry ".." in the subdirectory links to this directory + } + e.children[baseName] = child +} + +// isDataType reports whether TOCEntry is a regular file or chunk (something that +// contains regular file data). +func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" } + +// Stat returns a FileInfo value representing e. +func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} } + +// ForeachChild calls f for each child item. If f returns false, iteration ends. +// If e is not a directory, f is not called. +func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) { + for name, ent := range e.children { + if !f(name, ent) { + return + } + } +} + +// LookupChild returns the directory e's child by its base name. +func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) { + child, ok = e.children[baseName] + return +} + +// fileInfo implements os.FileInfo using the wrapped *TOCEntry. +type fileInfo struct{ e *TOCEntry } + +var _ os.FileInfo = fileInfo{} + +func (fi fileInfo) Name() string { return path.Base(fi.e.Name) } +func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" } +func (fi fileInfo) Size() int64 { return fi.e.Size } +func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() } +func (fi fileInfo) Sys() interface{} { return fi.e } +func (fi fileInfo) Mode() (m os.FileMode) { + // TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg. + m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() & + (os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky) + switch fi.e.Type { + case "dir": + m |= os.ModeDir + case "symlink": + m |= os.ModeSymlink + case "char": + m |= os.ModeDevice | os.ModeCharDevice + case "block": + m |= os.ModeDevice + case "fifo": + m |= os.ModeNamedPipe + } + return m +} + +// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained +// in a eStargz blob. +type TOCEntryVerifier interface { + + // Verifier provides a content verifier that can be used for verifying the + // contents of the specified TOCEntry. + Verifier(ce *TOCEntry) (digest.Verifier, error) +} + +// Compression provides the compression helper to be used creating and parsing eStargz. +// This package provides gzip-based Compression by default, but any compression +// algorithm (e.g. zstd) can be used as long as it implements Compression. +type Compression interface { + Compressor + Decompressor +} + +// Compressor represents the helper mothods to be used for creating eStargz. +type Compressor interface { + // Writer returns WriteCloser to be used for writing a chunk to eStargz. + // Everytime a chunk is written, the WriteCloser is closed and Writer is + // called again for writing the next chunk. + Writer(w io.Writer) (io.WriteCloser, error) + + // WriteTOCAndFooter is called to write JTOC to the passed Writer. + // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob + // WriteTOCAndFooter can optionally write anything that affects DiffID calculation + // (e.g. uncompressed TOC JSON). + // + // This function returns tocDgst that represents the digest of TOC that will be used + // to verify this blob when it's parsed. + WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error) +} + +// Decompressor represents the helper mothods to be used for parsing eStargz. +type Decompressor interface { + // Reader returns ReadCloser to be used for decompressing file payload. + Reader(r io.Reader) (io.ReadCloser, error) + + // FooterSize returns the size of the footer of this blob. + FooterSize() int64 + + // ParseFooter parses the footer and returns the offset and (compressed) size of TOC. + // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between + // the top until the TOC JSON). + // + // Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range + // from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize). + ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) + + // ParseTOC parses TOC from the passed reader. The reader provides the partial contents + // of the underlying blob that has the range specified by ParseFooter method. + // + // This function returns tocDgst that represents the digest of TOC that will be used + // to verify this blob. This must match to the value returned from + // Compressor.WriteTOCAndFooter that is used when creating this blob. + ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) +} diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS new file mode 100644 index 00000000..8990f85b --- /dev/null +++ b/vendor/github.com/docker/cli/AUTHORS @@ -0,0 +1,771 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `scripts/docs/generate-authors.sh`. + +Aanand Prasad +Aaron L. Xu +Aaron Lehmann +Aaron.L.Xu +Abdur Rehman +Abhinandan Prativadi +Abin Shahab +Abreto FU +Ace Tang +Addam Hardy +Adolfo Ochagavía +Adrian Plata +Adrien Duermael +Adrien Folie +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Akhil Mohan +Akihiro Suda +Akim Demaille +Alan Thompson +Albert Callarisa +Albin Kerouanton +Aleksa Sarai +Aleksander Piotrowski +Alessandro Boch +Alex Mavrogiannis +Alex Mayer +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Ryabov +Alexandre González +Alfred Landrum +Alicia Lauerman +Allen Sun +Alvin Deng +Amen Belayneh +Amir Goldstein +Amit Krishnan +Amit Shukla +Amy Lindburg +Anca Iordache +Anda Xu +Andrea Luzzardi +Andreas Köhler +Andrew France +Andrew Hsu +Andrew Macpherson +Andrew McDonnell +Andrew Po +Andrey Petrov +Andrii Berehuliak +André Martins +Andy Goldstein +Andy Rothfusz +Anil Madhavapeddy +Ankush Agarwal +Anne Henmi +Anton Polonskiy +Antonio Murdaca +Antonis Kalipetis +Anusha Ragunathan +Ao Li +Arash Deshmeh +Arko Dasgupta +Arnaud Porterie +Arthur Peka +Ashwini Oruganti +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Bastiaan Bakker +BastianHofmann +Ben Bonnefoy +Ben Creasy +Ben Firshman +Benjamin Boudreau +Benoit Sigoure +Bhumika Bayani +Bill Wang +Bin Liu +Bingshen Wang +Boaz Shuster +Bogdan Anton +Boris Pruessmann +Bradley Cicenas +Brandon Mitchell +Brandon Philips +Brent Salisbury +Bret Fisher +Brian (bex) Exelbierd +Brian Goff +Brian Wieder +Bryan Bess +Bryan Boreham +Bryan Murphy +bryfry +Cameron Spear +Cao Weiwei +Carlo Mion +Carlos Alexandro Becker +Carlos de Paula +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Faragher +Chao Wang +Charles Chan +Charles Law +Charles Smith +Charlie Drage +ChaYoung You +Chen Chuanliang +Chen Hanxiao +Chen Mingjie +Chen Qiu +Chris Gavin +Chris Gibson +Chris McKinnel +Chris Snow +Chris Weyl +Christian Persson +Christian Stefanescu +Christophe Robin +Christophe Vidal +Christopher Biscardi +Christopher Crone +Christopher Jones +Christy Norman +Chun Chen +Clinton Kitson +Coenraad Loubser +Colin Hebert +Collin Guarino +Colm Hally +Comical Derskeal <27731088+derskeal@users.noreply.github.com> +Corey Farrell +Corey Quon +Craig Wilhite +Cristian Staretu +Daehyeok Mun +Dafydd Crosby +Daisuke Ito +dalanlan +Damien Nadé +Dan Cotora +Daniel Artine +Daniel Cassidy +Daniel Dao +Daniel Farrell +Daniel Gasienica +Daniel Goosen +Daniel Helfand +Daniel Hiltgen +Daniel J Walsh +Daniel Nephin +Daniel Norberg +Daniel Watkins +Daniel Zhang +Daniil Nikolenko +Danny Berger +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Dave Goodchild +Dave Henderson +Dave Tucker +David Beitey +David Calavera +David Cramer +David Dooling +David Gageot +David Lechner +David Scott +David Sheets +David Williamson +David Xia +David Young +Deng Guangxing +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Docter +Derek McGowan +Deshi Xiao +Dharmit Shah +Dhawal Yogesh Bhanushali +Dieter Reuter +Dima Stopel +Dimitry Andric +Ding Fei +Diogo Monica +Djordje Lukic +Dmitry Gusev +Dmitry Smirnov +Dmitry V. Krivenok +Dominik Braun +Don Kjer +Dong Chen +Doug Davis +Drew Erny +Ed Costello +Elango Sivanandam +Eli Uriegas +Eli Uriegas +Elias Faxö +Elliot Luo <956941328@qq.com> +Eric Curtin +Eric G. Noriega +Eric Rosenberg +Eric Sage +Eric-Olivier Lamey +Erica Windisch +Erik Hollensbe +Erik St. Martin +Essam A. Hassan +Ethan Haynes +Euan Kemp +Eugene Yakubovich +Evan Allrich +Evan Hazlett +Evan Krall +Evelyn Xu +Everett Toews +Fabio Falci +Fabrizio Soppelsa +Felix Hupfeld +Felix Rabe +Filip Jareš +Flavio Crisciani +Florian Klein +Forest Johnson +Foysal Iqbal +François Scala +Fred Lifton +Frederic Hemberger +Frederick F. Kautz IV +Frederik Nordahl Jul Sabroe +Frieder Bluemle +Gabriel Nicolas Avellaneda +Gaetan de Villele +Gang Qiao +Gary Schaetz +Genki Takiuchi +George MacRorie +George Xie +Gianluca Borello +Gildas Cuisinier +Goksu Toprak +Gou Rao +Grant Reaber +Greg Pflaum +Guilhem Lettron +Guillaume J. Charmes +Guillaume Le Floch +gwx296173 +Günther Jungbluth +Hakan Özler +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harold Cooper +Harry Zhang +He Simei +Hector S +Helen Xie +Henning Sprang +Henry N +Hernan Garcia +Hongbin Lu +Hu Keping +Huayi Zhang +Hugo Gabriel Eyherabide +huqun +Huu Nguyen +Hyzhou Zhy +Ian Campbell +Ian Philpot +Ignacio Capurro +Ilya Dmitrichenko +Ilya Khlopotov +Ilya Sotkov +Ioan Eugen Stan +Isabel Jimenez +Ivan Grcic +Ivan Markin +Jacob Atzen +Jacob Tomlinson +Jaivish Kothari +Jake Lambert +Jake Sanders +James Nesbitt +James Turnbull +Jamie Hannaford +Jan Koprowski +Jan Pazdziora +Jan-Jaap Driessen +Jana Radhakrishnan +Jared Hocutt +Jasmine Hegman +Jason Heiss +Jason Plum +Jay Kamat +Jean Rouge +Jean-Christophe Sirot +Jean-Pierre Huynh +Jeff Lindsay +Jeff Nickoloff +Jeff Silberman +Jeremy Chambers +Jeremy Unruh +Jeremy Yallop +Jeroen Franse +Jesse Adametz +Jessica Frazelle +Jezeniel Zapanta +Jian Zhang +Jie Luo +Jilles Oldenbeuving +Jim Galasyn +Jimmy Leger +Jimmy Song +jimmyxian +Jintao Zhang +Joao Fernandes +Joe Abbey +Joe Doliner +Joe Gordon +Joel Handwell +Joey Geiger +Joffrey F +Johan Euphrosine +Johannes 'fish' Ziemke +John Feminella +John Harris +John Howard +John Laswell +John Maguire +John Mulhausen +John Starks +John Stephens +John Tims +John V. Martinez +John Willis +Jon Johnson +Jonatas Baldin +Jonathan Boulle +Jonathan Lee +Jonathan Lomas +Jonathan McCrohan +Jonh Wendell +Jordan Jennings +Jose J. Escobar <53836904+jescobar-docker@users.noreply.github.com> +Joseph Kern +Josh Bodah +Josh Chorlton +Josh Hawn +Josh Horwitz +Josh Soref +Julien Barbier +Julien Kassar +Julien Maitrehenry +Justas Brazauskas +Justin Cormack +Justin Simonelis +Justyn Temme +Jyrki Puttonen +Jérémie Drouet +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu (Kennan) +Kara Alexandra +Kareem Khazem +Karthik Nayak +Kat Samperi +Kathryn Spiers +Katie McLaughlin +Ke Xu +Kei Ohmura +Keith Hudgins +Ken Cochrane +Ken ICHIKAWA +Kenfe-Mickaël Laventure +Kevin Burke +Kevin Feyrer +Kevin Kern +Kevin Kirsche +Kevin Meredith +Kevin Richardson +Kevin Woblick +khaled souf +Kim Eik +Kir Kolyshkin +Kotaro Yoshimatsu +Krasi Georgiev +Kris-Mikael Krister +Kun Zhang +Kunal Kushwaha +Lachlan Cooper +Lai Jiangshan +Lars Kellogg-Stedman +Laura Frank +Laurent Erignoux +Lee Gaines +Lei Jitang +Lennie +Leo Gallucci +Lewis Daly +Li Yi +Li Yi +Liang-Chi Hsieh +Lifubang +Lihua Tang +Lily Guo +Lin Lu +Linus Heckemann +Liping Xue +Liron Levin +liwenqi +lixiaobing10051267 +Lloyd Dewolf +Lorenzo Fontana +Louis Opter +Luca Favatella +Luca Marturana +Lucas Chan +Luka Hartwig +Lukas Heeren +Lukasz Zajaczkowski +Lydell Manganti +Lénaïc Huard +Ma Shimiao +Mabin +Maciej Kalisz +Madhav Puri +Madhu Venugopal +Madhur Batra +Malte Janduda +Manjunath A Kumatagi +Mansi Nahar +mapk0y +Marc Bihlmaier +Marco Mariani +Marco Vedovati +Marcus Martins +Marianna Tessel +Marius Ileana +Marius Sturm +Mark Oates +Marsh Macy +Martin Mosegaard Amdisen +Mary Anthony +Mason Fish +Mason Malone +Mateusz Major +Mathieu Champlon +Matt Gucci +Matt Robenolt +Matteo Orefice +Matthew Heon +Matthieu Hauglustaine +Mauro Porras P +Max Shytikov +Maxime Petazzoni +Mei ChunTao +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Crosby +Michael Friis +Michael Irwin +Michael Käufl +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Steinert +Michael West +Michal Minář +Michał Czeraszkiewicz +Miguel Angel Alvarez Cabrerizo +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Casas +Mike Danese +Mike Dillon +Mike Goelzer +Mike MacCana +mikelinjie <294893458@qq.com> +Mikhail Vasin +Milind Chawre +Mindaugas Rukas +Miroslav Gula +Misty Stanley-Jones +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohini Anne Dsouza +Moorthy RS +Morgan Bauer +Morten Hekkvang +Moysés Borges +Mrunal Patel +muicoder +Muthukumar R +Máximo Cuadros +Mårten Cassel +Nace Oroz +Nahum Shalman +Nalin Dahyabhai +Nao YONASHIRO +Nassim 'Nass' Eddequiouaq +Natalie Parker +Nate Brennand +Nathan Hsieh +Nathan LeClaire +Nathan McCauley +Neil Peterson +Nick Adcock +Nico Stapelbroek +Nicola Kabar +Nicolas Borboën +Nicolas De Loof +Nikhil Chawla +Nikolas Garofil +Nikolay Milovanov +Nir Soffer +Nishant Totla +NIWA Hideyuki +Noah Treuhaft +O.S. Tezer +Odin Ugedal +ohmystack +Olle Jonsson +Olli Janatuinen +Oscar Wieman +Otto Kekäläinen +Ovidio Mallo +Pascal Borreli +Patrick Böänziger +Patrick Hemmer +Patrick Lang +Paul +Paul Kehrer +Paul Lietar +Paul Mulders +Paul Weaver +Pavel Pospisil +Paweł Szczekutowicz +Peeyush Gupta +Per Lundberg +Peter Edge +Peter Hsu +Peter Jaffe +Peter Kehl +Peter Nagy +Peter Salvatore +Peter Waller +Phil Estes +Philip Alexander Etling +Philipp Gillé +Philipp Schmied +pidster +pixelistik +Pratik Karki +Prayag Verma +Preston Cowley +Pure White +Qiang Huang +Qinglan Peng +qudongfang +Raghavendra K T +Rahul Zoldyck +Ravi Shekhar Jethani +Ray Tsang +Reficul +Remy Suen +Renaud Gaubert +Ricardo N Feliciano +Rich Moyse +Richard Mathie +Richard Scothern +Rick Wieman +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Gulewich +Robert Wallis +Robin Naundorf +Robin Speekenbrink +Rodolfo Ortiz +Rogelio Canedo +Rohan Verma +Roland Kammerer +Roman Dudin +Rory Hunter +Ross Boucher +Rubens Figueiredo +Rui Cao +Ryan Belgrave +Ryan Detzel +Ryan Stelly +Ryan Wilson-Perkin +Ryan Zhang +Sainath Grandhi +Sakeven Jiang +Sally O'Malley +Sam Neirinck +Samarth Shah +Sambuddha Basu +Sami Tabet +Samuel Cochran +Samuel Karp +Santhosh Manohar +Sargun Dhillon +Saswat Bhattacharya +Scott Brenner +Scott Collier +Sean Christopherson +Sean Rodman +Sebastiaan van Stijn +Sergey Tryuber +Serhat Gülçiçek +Sevki Hasirci +Shaun Kaasten +Sheng Yang +Shijiang Wei +Shishir Mahajan +Shoubhik Bose +Shukui Yang +Sian Lerk Lau +Sidhartha Mani +sidharthamani +Silvin Lubecki +Simei He +Simon Ferquel +Simon Heimberg +Sindhu S +Slava Semushin +Solomon Hykes +Song Gao +Spencer Brown +squeegels <1674195+squeegels@users.noreply.github.com> +Srini Brahmaroutu +Stefan S. +Stefan Scherer +Stefan Weil +Stephane Jeandeaux +Stephen Day +Stephen Rust +Steve Durrheimer +Steve Richards +Steven Burgess +Subhajit Ghosh +Sun Jianbo +Sune Keller +Sungwon Han +Sunny Gogoi +Sven Dowideit +Sylvain Baubeau +Sébastien HOUZÉ +T K Sourabh +TAGOMORI Satoshi +taiji-tech +Taylor Jones +Tejaswini Duggaraju +Tengfei Wang +Teppei Fukuda +Thatcher Peskens +Thibault Coupin +Thomas Gazagnaire +Thomas Krzero +Thomas Leonard +Thomas Léveil +Thomas Riccardi +Thomas Swift +Tianon Gravi +Tianyi Wang +Tibor Vass +Tim Dettrick +Tim Hockin +Tim Sampson +Tim Smith +Tim Waugh +Tim Wraight +timfeirg +Timothy Hobbs +Tobias Bradtke +Tobias Gesellchen +Todd Whiteman +Tom Denham +Tom Fotherby +Tom Klingenberg +Tom Milligan +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomáš Hrčka +Tony Abboud +Tõnis Tiigi +Trapier Marshall +Travis Cline +Tristan Carel +Tycho Andersen +Tycho Andersen +uhayate +Ulrich Bareth +Ulysses Souza +Umesh Yadav +Valentin Lorentz +Venkateswara Reddy Bukkasamudram +Veres Lajos +Victor Vieux +Victoria Bialas +Viktor Stanchev +Vimal Raghubir +Vincent Batts +Vincent Bernat +Vincent Demeester +Vincent Woo +Vishnu Kannan +Vivek Goyal +Wang Jie +Wang Lei +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Wang Yumu <37442693@qq.com> +Wataru Ishida +Wayne Song +Wen Cheng Ma +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +William Henry +Xianglin Gao +Xiaodong Liu +Xiaodong Zhang +Xiaoxi He +Xinbo Weng +Xuecong Liao +Yan Feng +Yanqiang Miao +Yassine Tijani +Yi EungJun +Ying Li +Yong Tang +Yosef Fertel +Yu Peng +Yuan Sun +Yue Zhang +Yunxiang Huang +Zachary Romero +Zander Mackie +zebrilee +Zhang Kun +Zhang Wei +Zhang Wentao +ZhangHang +zhenghenghuo +Zhou Hao +Zhoulin Xie +Zhu Guihua +Álex González +Álvaro Lázaro +Átila Camurça Alves +徐俊杰 diff --git a/vendor/github.com/docker/cli/LICENSE b/vendor/github.com/docker/cli/LICENSE new file mode 100644 index 00000000..9c8e20ab --- /dev/null +++ b/vendor/github.com/docker/cli/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/cli/NOTICE b/vendor/github.com/docker/cli/NOTICE new file mode 100644 index 00000000..58b19b6d --- /dev/null +++ b/vendor/github.com/docker/cli/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/creack/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/cli/cli/config/config.go b/vendor/github.com/docker/cli/cli/config/config.go new file mode 100644 index 00000000..31ad117d --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/config.go @@ -0,0 +1,167 @@ +package config + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/types" + "github.com/docker/docker/pkg/homedir" + "github.com/pkg/errors" +) + +const ( + // ConfigFileName is the name of config file + ConfigFileName = "config.json" + configFileDir = ".docker" + oldConfigfile = ".dockercfg" + contextsDir = "contexts" +) + +var ( + initConfigDir = new(sync.Once) + configDir string + homeDir string +) + +// resetHomeDir is used in testing to reset the "homeDir" package variable to +// force re-lookup of the home directory between tests. +func resetHomeDir() { + homeDir = "" +} + +func getHomeDir() string { + if homeDir == "" { + homeDir = homedir.Get() + } + return homeDir +} + +// resetConfigDir is used in testing to reset the "configDir" package variable +// and its sync.Once to force re-lookup between tests. +func resetConfigDir() { + configDir = "" + initConfigDir = new(sync.Once) +} + +func setConfigDir() { + if configDir != "" { + return + } + configDir = os.Getenv("DOCKER_CONFIG") + if configDir == "" { + configDir = filepath.Join(getHomeDir(), configFileDir) + } +} + +// Dir returns the directory the configuration file is stored in +func Dir() string { + initConfigDir.Do(setConfigDir) + return configDir +} + +// ContextStoreDir returns the directory the docker contexts are stored in +func ContextStoreDir() string { + return filepath.Join(Dir(), contextsDir) +} + +// SetDir sets the directory the configuration file is stored in +func SetDir(dir string) { + configDir = filepath.Clean(dir) +} + +// Path returns the path to a file relative to the config dir +func Path(p ...string) (string, error) { + path := filepath.Join(append([]string{Dir()}, p...)...) + if !strings.HasPrefix(path, Dir()+string(filepath.Separator)) { + return "", errors.Errorf("path %q is outside of root config directory %q", path, Dir()) + } + return path, nil +} + +// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from +// a non-nested reader +func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + } + err := configFile.LegacyLoadFromReader(configData) + return &configFile, err +} + +// LoadFromReader is a convenience function that creates a ConfigFile object from +// a reader +func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + } + err := configFile.LoadFromReader(configData) + return &configFile, err +} + +// Load reads the configuration files in the given directory, and sets up +// the auth config information and returns values. +// FIXME: use the internal golang config parser +func Load(configDir string) (*configfile.ConfigFile, error) { + cfg, _, err := load(configDir) + return cfg, err +} + +// TODO remove this temporary hack, which is used to warn about the deprecated ~/.dockercfg file +// so we can remove the bool return value and collapse this back into `Load` +func load(configDir string) (*configfile.ConfigFile, bool, error) { + printLegacyFileWarning := false + + if configDir == "" { + configDir = Dir() + } + + filename := filepath.Join(configDir, ConfigFileName) + configFile := configfile.New(filename) + + // Try happy path first - latest config file + if file, err := os.Open(filename); err == nil { + defer file.Close() + err = configFile.LoadFromReader(file) + if err != nil { + err = errors.Wrap(err, filename) + } + return configFile, printLegacyFileWarning, err + } else if !os.IsNotExist(err) { + // if file is there but we can't stat it for any reason other + // than it doesn't exist then stop + return configFile, printLegacyFileWarning, errors.Wrap(err, filename) + } + + // Can't find latest config file so check for the old one + filename = filepath.Join(getHomeDir(), oldConfigfile) + if file, err := os.Open(filename); err == nil { + printLegacyFileWarning = true + defer file.Close() + if err := configFile.LegacyLoadFromReader(file); err != nil { + return configFile, printLegacyFileWarning, errors.Wrap(err, filename) + } + } + return configFile, printLegacyFileWarning, nil +} + +// LoadDefaultConfigFile attempts to load the default config file and returns +// an initialized ConfigFile struct if none is found. +func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile { + configFile, printLegacyFileWarning, err := load(Dir()) + if err != nil { + fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err) + } + if printLegacyFileWarning { + _, _ = fmt.Fprintln(stderr, "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format is deprecated and will be removed in an upcoming release") + } + if !configFile.ContainsAuth() { + configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore) + } + return configFile +} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go new file mode 100644 index 00000000..d6f71081 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -0,0 +1,415 @@ +package configfile + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // This constant is only used for really old config files when the + // URL wasn't saved as part of the config file and it was just + // assumed to be this value. + defaultIndexServer = "https://index.docker.io/v1/" +) + +// ConfigFile ~/.docker/config.json file info +type ConfigFile struct { + AuthConfigs map[string]types.AuthConfig `json:"auths"` + HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` + PsFormat string `json:"psFormat,omitempty"` + ImagesFormat string `json:"imagesFormat,omitempty"` + NetworksFormat string `json:"networksFormat,omitempty"` + PluginsFormat string `json:"pluginsFormat,omitempty"` + VolumesFormat string `json:"volumesFormat,omitempty"` + StatsFormat string `json:"statsFormat,omitempty"` + DetachKeys string `json:"detachKeys,omitempty"` + CredentialsStore string `json:"credsStore,omitempty"` + CredentialHelpers map[string]string `json:"credHelpers,omitempty"` + Filename string `json:"-"` // Note: for internal use only + ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` + ServicesFormat string `json:"servicesFormat,omitempty"` + TasksFormat string `json:"tasksFormat,omitempty"` + SecretFormat string `json:"secretFormat,omitempty"` + ConfigFormat string `json:"configFormat,omitempty"` + NodesFormat string `json:"nodesFormat,omitempty"` + PruneFilters []string `json:"pruneFilters,omitempty"` + Proxies map[string]ProxyConfig `json:"proxies,omitempty"` + Experimental string `json:"experimental,omitempty"` + StackOrchestrator string `json:"stackOrchestrator,omitempty"` + Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"` + CurrentContext string `json:"currentContext,omitempty"` + CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` + Plugins map[string]map[string]string `json:"plugins,omitempty"` + Aliases map[string]string `json:"aliases,omitempty"` +} + +// ProxyConfig contains proxy configuration settings +type ProxyConfig struct { + HTTPProxy string `json:"httpProxy,omitempty"` + HTTPSProxy string `json:"httpsProxy,omitempty"` + NoProxy string `json:"noProxy,omitempty"` + FTPProxy string `json:"ftpProxy,omitempty"` +} + +// KubernetesConfig contains Kubernetes orchestrator settings +type KubernetesConfig struct { + AllNamespaces string `json:"allNamespaces,omitempty"` +} + +// New initializes an empty configuration file for the given filename 'fn' +func New(fn string) *ConfigFile { + return &ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + HTTPHeaders: make(map[string]string), + Filename: fn, + Plugins: make(map[string]map[string]string), + Aliases: make(map[string]string), + } +} + +// LegacyLoadFromReader reads the non-nested configuration data given and sets up the +// auth config information with given directory and populates the receiver object +func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { + b, err := ioutil.ReadAll(configData) + if err != nil { + return err + } + + if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { + arr := strings.Split(string(b), "\n") + if len(arr) < 2 { + return errors.Errorf("The Auth config file is empty") + } + authConfig := types.AuthConfig{} + origAuth := strings.Split(arr[0], " = ") + if len(origAuth) != 2 { + return errors.Errorf("Invalid Auth config file") + } + authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) + if err != nil { + return err + } + authConfig.ServerAddress = defaultIndexServer + configFile.AuthConfigs[defaultIndexServer] = authConfig + } else { + for k, authConfig := range configFile.AuthConfigs { + authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) + if err != nil { + return err + } + authConfig.Auth = "" + authConfig.ServerAddress = k + configFile.AuthConfigs[k] = authConfig + } + } + return nil +} + +// LoadFromReader reads the configuration data given and sets up the auth config +// information with given directory and populates the receiver object +func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { + if err := json.NewDecoder(configData).Decode(configFile); err != nil && !errors.Is(err, io.EOF) { + return err + } + var err error + for addr, ac := range configFile.AuthConfigs { + if ac.Auth != "" { + ac.Username, ac.Password, err = decodeAuth(ac.Auth) + if err != nil { + return err + } + } + ac.Auth = "" + ac.ServerAddress = addr + configFile.AuthConfigs[addr] = ac + } + return checkKubernetesConfiguration(configFile.Kubernetes) +} + +// ContainsAuth returns whether there is authentication configured +// in this file or not. +func (configFile *ConfigFile) ContainsAuth() bool { + return configFile.CredentialsStore != "" || + len(configFile.CredentialHelpers) > 0 || + len(configFile.AuthConfigs) > 0 +} + +// GetAuthConfigs returns the mapping of repo to auth configuration +func (configFile *ConfigFile) GetAuthConfigs() map[string]types.AuthConfig { + return configFile.AuthConfigs +} + +// SaveToWriter encodes and writes out all the authorization information to +// the given writer +func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { + // Encode sensitive data into a new/temp struct + tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) + for k, authConfig := range configFile.AuthConfigs { + authCopy := authConfig + // encode and save the authstring, while blanking out the original fields + authCopy.Auth = encodeAuth(&authCopy) + authCopy.Username = "" + authCopy.Password = "" + authCopy.ServerAddress = "" + tmpAuthConfigs[k] = authCopy + } + + saveAuthConfigs := configFile.AuthConfigs + configFile.AuthConfigs = tmpAuthConfigs + defer func() { configFile.AuthConfigs = saveAuthConfigs }() + + // User-Agent header is automatically set, and should not be stored in the configuration + for v := range configFile.HTTPHeaders { + if strings.EqualFold(v, "User-Agent") { + delete(configFile.HTTPHeaders, v) + } + } + + data, err := json.MarshalIndent(configFile, "", "\t") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Save encodes and writes out all the authorization information +func (configFile *ConfigFile) Save() (retErr error) { + if configFile.Filename == "" { + return errors.Errorf("Can't save config with empty filename") + } + + dir := filepath.Dir(configFile.Filename) + if err := os.MkdirAll(dir, 0700); err != nil { + return err + } + temp, err := ioutil.TempFile(dir, filepath.Base(configFile.Filename)) + if err != nil { + return err + } + defer func() { + temp.Close() + if retErr != nil { + if err := os.Remove(temp.Name()); err != nil { + logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file") + } + } + }() + + err = configFile.SaveToWriter(temp) + if err != nil { + return err + } + + if err := temp.Close(); err != nil { + return errors.Wrap(err, "error closing temp file") + } + + // Handle situation where the configfile is a symlink + cfgFile := configFile.Filename + if f, err := os.Readlink(cfgFile); err == nil { + cfgFile = f + } + + // Try copying the current config file (if any) ownership and permissions + copyFilePermissions(cfgFile, temp.Name()) + return os.Rename(temp.Name(), cfgFile) +} + +// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and +// then checking this against any environment variables provided to the container +func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string { + var cfgKey string + + if _, ok := configFile.Proxies[host]; !ok { + cfgKey = "default" + } else { + cfgKey = host + } + + config := configFile.Proxies[cfgKey] + permitted := map[string]*string{ + "HTTP_PROXY": &config.HTTPProxy, + "HTTPS_PROXY": &config.HTTPSProxy, + "NO_PROXY": &config.NoProxy, + "FTP_PROXY": &config.FTPProxy, + } + m := runOpts + if m == nil { + m = make(map[string]*string) + } + for k := range permitted { + if *permitted[k] == "" { + continue + } + if _, ok := m[k]; !ok { + m[k] = permitted[k] + } + if _, ok := m[strings.ToLower(k)]; !ok { + m[strings.ToLower(k)] = permitted[k] + } + } + return m +} + +// encodeAuth creates a base64 encoded string to containing authorization information +func encodeAuth(authConfig *types.AuthConfig) string { + if authConfig.Username == "" && authConfig.Password == "" { + return "" + } + + authStr := authConfig.Username + ":" + authConfig.Password + msg := []byte(authStr) + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) + base64.StdEncoding.Encode(encoded, msg) + return string(encoded) +} + +// decodeAuth decodes a base64 encoded string and returns username and password +func decodeAuth(authStr string) (string, string, error) { + if authStr == "" { + return "", "", nil + } + + decLen := base64.StdEncoding.DecodedLen(len(authStr)) + decoded := make([]byte, decLen) + authByte := []byte(authStr) + n, err := base64.StdEncoding.Decode(decoded, authByte) + if err != nil { + return "", "", err + } + if n > decLen { + return "", "", errors.Errorf("Something went wrong decoding auth config") + } + arr := strings.SplitN(string(decoded), ":", 2) + if len(arr) != 2 { + return "", "", errors.Errorf("Invalid auth configuration file") + } + password := strings.Trim(arr[1], "\x00") + return arr[0], password, nil +} + +// GetCredentialsStore returns a new credentials store from the settings in the +// configuration file +func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store { + if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" { + return newNativeStore(configFile, helper) + } + return credentials.NewFileStore(configFile) +} + +// var for unit testing. +var newNativeStore = func(configFile *ConfigFile, helperSuffix string) credentials.Store { + return credentials.NewNativeStore(configFile, helperSuffix) +} + +// GetAuthConfig for a repository from the credential store +func (configFile *ConfigFile) GetAuthConfig(registryHostname string) (types.AuthConfig, error) { + return configFile.GetCredentialsStore(registryHostname).Get(registryHostname) +} + +// getConfiguredCredentialStore returns the credential helper configured for the +// given registry, the default credsStore, or the empty string if neither are +// configured. +func getConfiguredCredentialStore(c *ConfigFile, registryHostname string) string { + if c.CredentialHelpers != nil && registryHostname != "" { + if helper, exists := c.CredentialHelpers[registryHostname]; exists { + return helper + } + } + return c.CredentialsStore +} + +// GetAllCredentials returns all of the credentials stored in all of the +// configured credential stores. +func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, error) { + auths := make(map[string]types.AuthConfig) + addAll := func(from map[string]types.AuthConfig) { + for reg, ac := range from { + auths[reg] = ac + } + } + + defaultStore := configFile.GetCredentialsStore("") + newAuths, err := defaultStore.GetAll() + if err != nil { + return nil, err + } + addAll(newAuths) + + // Auth configs from a registry-specific helper should override those from the default store. + for registryHostname := range configFile.CredentialHelpers { + newAuth, err := configFile.GetAuthConfig(registryHostname) + if err != nil { + return nil, err + } + auths[registryHostname] = newAuth + } + return auths, nil +} + +// GetFilename returns the file name that this config file is based on. +func (configFile *ConfigFile) GetFilename() string { + return configFile.Filename +} + +// PluginConfig retrieves the requested option for the given plugin. +func (configFile *ConfigFile) PluginConfig(pluginname, option string) (string, bool) { + if configFile.Plugins == nil { + return "", false + } + pluginConfig, ok := configFile.Plugins[pluginname] + if !ok { + return "", false + } + value, ok := pluginConfig[option] + return value, ok +} + +// SetPluginConfig sets the option to the given value for the given +// plugin. Passing a value of "" will remove the option. If removing +// the final config item for a given plugin then also cleans up the +// overall plugin entry. +func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) { + if configFile.Plugins == nil { + configFile.Plugins = make(map[string]map[string]string) + } + pluginConfig, ok := configFile.Plugins[pluginname] + if !ok { + pluginConfig = make(map[string]string) + configFile.Plugins[pluginname] = pluginConfig + } + if value != "" { + pluginConfig[option] = value + } else { + delete(pluginConfig, option) + } + if len(pluginConfig) == 0 { + delete(configFile.Plugins, pluginname) + } +} + +func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error { + if kubeConfig == nil { + return nil + } + switch kubeConfig.AllNamespaces { + case "": + case "enabled": + case "disabled": + default: + return fmt.Errorf("invalid 'kubernetes.allNamespaces' value, should be 'enabled' or 'disabled': %s", kubeConfig.AllNamespaces) + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go new file mode 100644 index 00000000..6af67181 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go @@ -0,0 +1,36 @@ +//go:build !windows +// +build !windows + +package configfile + +import ( + "os" + "syscall" +) + +// copyFilePermissions copies file ownership and permissions from "src" to "dst", +// ignoring any error during the process. +func copyFilePermissions(src, dst string) { + var ( + mode os.FileMode = 0600 + uid, gid int + ) + + fi, err := os.Stat(src) + if err != nil { + return + } + if fi.Mode().IsRegular() { + mode = fi.Mode() + } + if err := os.Chmod(dst, mode); err != nil { + return + } + + uid = int(fi.Sys().(*syscall.Stat_t).Uid) + gid = int(fi.Sys().(*syscall.Stat_t).Gid) + + if uid > 0 && gid > 0 { + _ = os.Chown(dst, uid, gid) + } +} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go b/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go new file mode 100644 index 00000000..42fffc39 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go @@ -0,0 +1,5 @@ +package configfile + +func copyFilePermissions(src, dst string) { + // TODO implement for Windows +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/credentials.go b/vendor/github.com/docker/cli/cli/config/credentials/credentials.go new file mode 100644 index 00000000..28d58ec4 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/credentials.go @@ -0,0 +1,17 @@ +package credentials + +import ( + "github.com/docker/cli/cli/config/types" +) + +// Store is the interface that any credentials store must implement. +type Store interface { + // Erase removes credentials from the store for a given server. + Erase(serverAddress string) error + // Get retrieves credentials from the store for a given server. + Get(serverAddress string) (types.AuthConfig, error) + // GetAll retrieves all the credentials from the store. + GetAll() (map[string]types.AuthConfig, error) + // Store saves credentials in the store. + Store(authConfig types.AuthConfig) error +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store.go new file mode 100644 index 00000000..402235bf --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store.go @@ -0,0 +1,21 @@ +package credentials + +import ( + exec "golang.org/x/sys/execabs" +) + +// DetectDefaultStore return the default credentials store for the platform if +// the store executable is available. +func DetectDefaultStore(store string) string { + platformDefault := defaultCredentialsStore() + + // user defined or no default for platform + if store != "" || platformDefault == "" { + return store + } + + if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err == nil { + return platformDefault + } + return "" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go new file mode 100644 index 00000000..5d42dec6 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go @@ -0,0 +1,5 @@ +package credentials + +func defaultCredentialsStore() string { + return "osxkeychain" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go new file mode 100644 index 00000000..a9012c6d --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go @@ -0,0 +1,13 @@ +package credentials + +import ( + "os/exec" +) + +func defaultCredentialsStore() string { + if _, err := exec.LookPath("pass"); err == nil { + return "pass" + } + + return "secretservice" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go new file mode 100644 index 00000000..c9630ea5 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go @@ -0,0 +1,8 @@ +//go:build !windows && !darwin && !linux +// +build !windows,!darwin,!linux + +package credentials + +func defaultCredentialsStore() string { + return "" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go new file mode 100644 index 00000000..bb799ca6 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go @@ -0,0 +1,5 @@ +package credentials + +func defaultCredentialsStore() string { + return "wincred" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go new file mode 100644 index 00000000..e509820b --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go @@ -0,0 +1,81 @@ +package credentials + +import ( + "strings" + + "github.com/docker/cli/cli/config/types" +) + +type store interface { + Save() error + GetAuthConfigs() map[string]types.AuthConfig + GetFilename() string +} + +// fileStore implements a credentials store using +// the docker configuration file to keep the credentials in plain text. +type fileStore struct { + file store +} + +// NewFileStore creates a new file credentials store. +func NewFileStore(file store) Store { + return &fileStore{file: file} +} + +// Erase removes the given credentials from the file store. +func (c *fileStore) Erase(serverAddress string) error { + delete(c.file.GetAuthConfigs(), serverAddress) + return c.file.Save() +} + +// Get retrieves credentials for a specific server from the file store. +func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { + authConfig, ok := c.file.GetAuthConfigs()[serverAddress] + if !ok { + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for r, ac := range c.file.GetAuthConfigs() { + if serverAddress == ConvertToHostname(r) { + return ac, nil + } + } + + authConfig = types.AuthConfig{} + } + return authConfig, nil +} + +func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { + return c.file.GetAuthConfigs(), nil +} + +// Store saves the given credentials in the file store. +func (c *fileStore) Store(authConfig types.AuthConfig) error { + c.file.GetAuthConfigs()[authConfig.ServerAddress] = authConfig + return c.file.Save() +} + +func (c *fileStore) GetFilename() string { + return c.file.GetFilename() +} + +func (c *fileStore) IsFileStore() bool { + return true +} + +// ConvertToHostname converts a registry url which has http|https prepended +// to just an hostname. +// Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies. +func ConvertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/native_store.go b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go new file mode 100644 index 00000000..f9619b03 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go @@ -0,0 +1,143 @@ +package credentials + +import ( + "github.com/docker/cli/cli/config/types" + "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" +) + +const ( + remoteCredentialsPrefix = "docker-credential-" //nolint:gosec // ignore G101: Potential hardcoded credentials + tokenUsername = "" +) + +// nativeStore implements a credentials store +// using native keychain to keep credentials secure. +// It piggybacks into a file store to keep users' emails. +type nativeStore struct { + programFunc client.ProgramFunc + fileStore Store +} + +// NewNativeStore creates a new native store that +// uses a remote helper program to manage credentials. +func NewNativeStore(file store, helperSuffix string) Store { + name := remoteCredentialsPrefix + helperSuffix + return &nativeStore{ + programFunc: client.NewShellProgramFunc(name), + fileStore: NewFileStore(file), + } +} + +// Erase removes the given credentials from the native store. +func (c *nativeStore) Erase(serverAddress string) error { + if err := client.Erase(c.programFunc, serverAddress); err != nil { + return err + } + + // Fallback to plain text store to remove email + return c.fileStore.Erase(serverAddress) +} + +// Get retrieves credentials for a specific server from the native store. +func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { + // load user email if it exist or an empty auth config. + auth, _ := c.fileStore.Get(serverAddress) + + creds, err := c.getCredentialsFromStore(serverAddress) + if err != nil { + return auth, err + } + auth.Username = creds.Username + auth.IdentityToken = creds.IdentityToken + auth.Password = creds.Password + + return auth, nil +} + +// GetAll retrieves all the credentials from the native store. +func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { + auths, err := c.listCredentialsInStore() + if err != nil { + return nil, err + } + + // Emails are only stored in the file store. + // This call can be safely eliminated when emails are removed. + fileConfigs, _ := c.fileStore.GetAll() + + authConfigs := make(map[string]types.AuthConfig) + for registry := range auths { + creds, err := c.getCredentialsFromStore(registry) + if err != nil { + return nil, err + } + ac := fileConfigs[registry] // might contain Email + ac.Username = creds.Username + ac.Password = creds.Password + ac.IdentityToken = creds.IdentityToken + authConfigs[registry] = ac + } + + return authConfigs, nil +} + +// Store saves the given credentials in the file store. +func (c *nativeStore) Store(authConfig types.AuthConfig) error { + if err := c.storeCredentialsInStore(authConfig); err != nil { + return err + } + authConfig.Username = "" + authConfig.Password = "" + authConfig.IdentityToken = "" + + // Fallback to old credential in plain text to save only the email + return c.fileStore.Store(authConfig) +} + +// storeCredentialsInStore executes the command to store the credentials in the native store. +func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { + creds := &credentials.Credentials{ + ServerURL: config.ServerAddress, + Username: config.Username, + Secret: config.Password, + } + + if config.IdentityToken != "" { + creds.Username = tokenUsername + creds.Secret = config.IdentityToken + } + + return client.Store(c.programFunc, creds) +} + +// getCredentialsFromStore executes the command to get the credentials from the native store. +func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { + var ret types.AuthConfig + + creds, err := client.Get(c.programFunc, serverAddress) + if err != nil { + if credentials.IsErrCredentialsNotFound(err) { + // do not return an error if the credentials are not + // in the keychain. Let docker ask for new credentials. + return ret, nil + } + return ret, err + } + + if creds.Username == tokenUsername { + ret.IdentityToken = creds.Secret + } else { + ret.Password = creds.Secret + ret.Username = creds.Username + } + + ret.ServerAddress = serverAddress + return ret, nil +} + +// listCredentialsInStore returns a listing of stored credentials as a map of +// URL -> username. +func (c *nativeStore) listCredentialsInStore() (map[string]string, error) { + return client.List(c.programFunc) +} diff --git a/vendor/github.com/docker/cli/cli/config/types/authconfig.go b/vendor/github.com/docker/cli/cli/config/types/authconfig.go new file mode 100644 index 00000000..056af6b8 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/types/authconfig.go @@ -0,0 +1,22 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE new file mode 100644 index 00000000..e06d2081 --- /dev/null +++ b/vendor/github.com/docker/distribution/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go new file mode 100644 index 00000000..2c3ebe16 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go @@ -0,0 +1,27 @@ +package challenge + +import ( + "net/url" + "strings" +) + +// FROM: https://golang.org/src/net/http/http.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +// FROM: http://golang.org/src/net/http/transport.go +func canonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go new file mode 100644 index 00000000..fe238210 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go @@ -0,0 +1,237 @@ +package challenge + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" +) + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // Scheme is the auth-scheme according to RFC 2617 + Scheme string + + // Parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +// Manager manages the challenges for endpoints. +// The challenges are pulled out of HTTP responses. Only +// responses which expect challenges should be added to +// the manager, since a non-unauthorized request will be +// viewed as not requiring challenges. +type Manager interface { + // GetChallenges returns the challenges for the given + // endpoint URL. + GetChallenges(endpoint url.URL) ([]Challenge, error) + + // AddResponse adds the response to the challenge + // manager. The challenges will be parsed out of + // the WWW-Authenicate headers and added to the + // URL which was produced the response. If the + // response was authorized, any challenges for the + // endpoint will be cleared. + AddResponse(resp *http.Response) error +} + +// NewSimpleManager returns an instance of +// Manger which only maps endpoints to challenges +// based on the responses which have been added the +// manager. The simple manager will make no attempt to +// perform requests on the endpoints or cache the responses +// to a backend. +func NewSimpleManager() Manager { + return &simpleManager{ + Challenges: make(map[string][]Challenge), + } +} + +type simpleManager struct { + sync.RWMutex + Challenges map[string][]Challenge +} + +func normalizeURL(endpoint *url.URL) { + endpoint.Host = strings.ToLower(endpoint.Host) + endpoint.Host = canonicalAddr(endpoint) +} + +func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { + normalizeURL(&endpoint) + + m.RLock() + defer m.RUnlock() + challenges := m.Challenges[endpoint.String()] + return challenges, nil +} + +func (m *simpleManager) AddResponse(resp *http.Response) error { + challenges := ResponseChallenges(resp) + if resp.Request == nil { + return fmt.Errorf("missing request reference") + } + urlCopy := url.URL{ + Path: resp.Request.URL.Path, + Host: resp.Request.URL.Host, + Scheme: resp.Request.URL.Scheme, + } + normalizeURL(&urlCopy) + + m.Lock() + defer m.Unlock() + m.Challenges[urlCopy.String()] = challenges + return nil +} + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// ResponseChallenges returns a list of authorization challenges +// for the given http Response. Challenges are only checked if +// the response status code was a 401. +func ResponseChallenges(resp *http.Response) []Challenge { + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header) + } + + return nil +} + +func parseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/docker/docker-credential-helpers/LICENSE b/vendor/github.com/docker/docker-credential-helpers/LICENSE new file mode 100644 index 00000000..1ea555e2 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2016 David Calavera + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go new file mode 100644 index 00000000..d1d0434c --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/client/client.go @@ -0,0 +1,121 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + + "github.com/docker/docker-credential-helpers/credentials" +) + +// isValidCredsMessage checks if 'msg' contains invalid credentials error message. +// It returns whether the logs are free of invalid credentials errors and the error if it isn't. +// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername. +func isValidCredsMessage(msg string) error { + if credentials.IsCredentialsMissingServerURLMessage(msg) { + return credentials.NewErrCredentialsMissingServerURL() + } + + if credentials.IsCredentialsMissingUsernameMessage(msg) { + return credentials.NewErrCredentialsMissingUsername() + } + + return nil +} + +// Store uses an external program to save credentials. +func Store(program ProgramFunc, creds *credentials.Credentials) error { + cmd := program("store") + + buffer := new(bytes.Buffer) + if err := json.NewEncoder(buffer).Encode(creds); err != nil { + return err + } + cmd.Input(buffer) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) + } + + return nil +} + +// Get executes an external program to get the credentials from a native store. +func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { + cmd := program("get") + cmd.Input(strings.NewReader(serverURL)) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if credentials.IsErrCredentialsNotFoundMessage(t) { + return nil, credentials.NewErrCredentialsNotFound() + } + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) + } + + resp := &credentials.Credentials{ + ServerURL: serverURL, + } + + if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil { + return nil, err + } + + return resp, nil +} + +// Erase executes a program to remove the server credentials from the native store. +func Erase(program ProgramFunc, serverURL string) error { + cmd := program("erase") + cmd.Input(strings.NewReader(serverURL)) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t) + } + + return nil +} + +// List executes a program to list server credentials in the native store. +func List(program ProgramFunc) (map[string]string, error) { + cmd := program("list") + cmd.Input(strings.NewReader("unused")) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t) + } + + var resp map[string]string + if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { + return nil, err + } + + return resp, nil +} diff --git a/vendor/github.com/docker/docker-credential-helpers/client/command.go b/vendor/github.com/docker/docker-credential-helpers/client/command.go new file mode 100644 index 00000000..0183c063 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/client/command.go @@ -0,0 +1,57 @@ +package client + +import ( + "fmt" + "io" + "os" + + exec "golang.org/x/sys/execabs" +) + +// Program is an interface to execute external programs. +type Program interface { + Output() ([]byte, error) + Input(in io.Reader) +} + +// ProgramFunc is a type of function that initializes programs based on arguments. +type ProgramFunc func(args ...string) Program + +// NewShellProgramFunc creates programs that are executed in a Shell. +func NewShellProgramFunc(name string) ProgramFunc { + return NewShellProgramFuncWithEnv(name, nil) +} + +// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables +func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc { + return func(args ...string) Program { + return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)} + } +} + +func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { + programCmd := exec.Command(commandName, args...) + programCmd.Env = os.Environ() + if env != nil { + for k, v := range *env { + programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v)) + } + } + programCmd.Stderr = os.Stderr + return programCmd +} + +// Shell invokes shell commands to talk with a remote credentials helper. +type Shell struct { + cmd *exec.Cmd +} + +// Output returns responses from the remote credentials helper. +func (s *Shell) Output() ([]byte, error) { + return s.cmd.Output() +} + +// Input sets the input to send to a remote credentials helper. +func (s *Shell) Input(in io.Reader) { + s.cmd.Stdin = in +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go new file mode 100644 index 00000000..91d9d4bb --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go @@ -0,0 +1,186 @@ +package credentials + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strings" +) + +// Credentials holds the information shared between docker and the credentials store. +type Credentials struct { + ServerURL string + Username string + Secret string +} + +// isValid checks the integrity of Credentials object such that no credentials lack +// a server URL or a username. +// It returns whether the credentials are valid and the error if it isn't. +// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername +func (c *Credentials) isValid() (bool, error) { + if len(c.ServerURL) == 0 { + return false, NewErrCredentialsMissingServerURL() + } + + if len(c.Username) == 0 { + return false, NewErrCredentialsMissingUsername() + } + + return true, nil +} + +// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling. +// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain, +// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials" +var CredsLabel = "Docker Credentials" + +// SetCredsLabel is a simple setter for CredsLabel +func SetCredsLabel(label string) { + CredsLabel = label +} + +// Serve initializes the credentials helper and parses the action argument. +// This function is designed to be called from a command line interface. +// It uses os.Args[1] as the key for the action. +// It uses os.Stdin as input and os.Stdout as output. +// This function terminates the program with os.Exit(1) if there is an error. +func Serve(helper Helper) { + var err error + if len(os.Args) != 2 { + err = fmt.Errorf("Usage: %s ", os.Args[0]) + } + + if err == nil { + err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout) + } + + if err != nil { + fmt.Fprintf(os.Stdout, "%v\n", err) + os.Exit(1) + } +} + +// HandleCommand uses a helper and a key to run a credential action. +func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error { + switch key { + case "store": + return Store(helper, in) + case "get": + return Get(helper, in, out) + case "erase": + return Erase(helper, in) + case "list": + return List(helper, out) + case "version": + return PrintVersion(out) + } + return fmt.Errorf("Unknown credential action `%s`", key) +} + +// Store uses a helper and an input reader to save credentials. +// The reader must contain the JSON serialization of a Credentials struct. +func Store(helper Helper, reader io.Reader) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + var creds Credentials + if err := json.NewDecoder(buffer).Decode(&creds); err != nil { + return err + } + + if ok, err := creds.isValid(); !ok { + return err + } + + return helper.Add(&creds) +} + +// Get retrieves the credentials for a given server url. +// The reader must contain the server URL to search. +// The writer is used to write the JSON serialization of the credentials. +func Get(helper Helper, reader io.Reader, writer io.Writer) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + serverURL := strings.TrimSpace(buffer.String()) + if len(serverURL) == 0 { + return NewErrCredentialsMissingServerURL() + } + + username, secret, err := helper.Get(serverURL) + if err != nil { + return err + } + + resp := Credentials{ + ServerURL: serverURL, + Username: username, + Secret: secret, + } + + buffer.Reset() + if err := json.NewEncoder(buffer).Encode(resp); err != nil { + return err + } + + fmt.Fprint(writer, buffer.String()) + return nil +} + +// Erase removes credentials from the store. +// The reader must contain the server URL to remove. +func Erase(helper Helper, reader io.Reader) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + serverURL := strings.TrimSpace(buffer.String()) + if len(serverURL) == 0 { + return NewErrCredentialsMissingServerURL() + } + + return helper.Delete(serverURL) +} + +// List returns all the serverURLs of keys in +// the OS store as a list of strings +func List(helper Helper, writer io.Writer) error { + accts, err := helper.List() + if err != nil { + return err + } + return json.NewEncoder(writer).Encode(accts) +} + +// PrintVersion outputs the current version. +func PrintVersion(writer io.Writer) error { + fmt.Fprintf(writer, "%s (%s) %s\n", Name, Package, Version) + return nil +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go new file mode 100644 index 00000000..fe6a5aef --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go @@ -0,0 +1,102 @@ +package credentials + +const ( + // ErrCredentialsNotFound standardizes the not found error, so every helper returns + // the same message and docker can handle it properly. + errCredentialsNotFoundMessage = "credentials not found in native keychain" + + // ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize + // invalid credentials or credentials management operations + errCredentialsMissingServerURLMessage = "no credentials server URL" + errCredentialsMissingUsernameMessage = "no credentials username" +) + +// errCredentialsNotFound represents an error +// raised when credentials are not in the store. +type errCredentialsNotFound struct{} + +// Error returns the standard error message +// for when the credentials are not in the store. +func (errCredentialsNotFound) Error() string { + return errCredentialsNotFoundMessage +} + +// NewErrCredentialsNotFound creates a new error +// for when the credentials are not in the store. +func NewErrCredentialsNotFound() error { + return errCredentialsNotFound{} +} + +// IsErrCredentialsNotFound returns true if the error +// was caused by not having a set of credentials in a store. +func IsErrCredentialsNotFound(err error) bool { + _, ok := err.(errCredentialsNotFound) + return ok +} + +// IsErrCredentialsNotFoundMessage returns true if the error +// was caused by not having a set of credentials in a store. +// +// This function helps to check messages returned by an +// external program via its standard output. +func IsErrCredentialsNotFoundMessage(err string) bool { + return err == errCredentialsNotFoundMessage +} + +// errCredentialsMissingServerURL represents an error raised +// when the credentials object has no server URL or when no +// server URL is provided to a credentials operation requiring +// one. +type errCredentialsMissingServerURL struct{} + +func (errCredentialsMissingServerURL) Error() string { + return errCredentialsMissingServerURLMessage +} + +// errCredentialsMissingUsername represents an error raised +// when the credentials object has no username or when no +// username is provided to a credentials operation requiring +// one. +type errCredentialsMissingUsername struct{} + +func (errCredentialsMissingUsername) Error() string { + return errCredentialsMissingUsernameMessage +} + +// NewErrCredentialsMissingServerURL creates a new error for +// errCredentialsMissingServerURL. +func NewErrCredentialsMissingServerURL() error { + return errCredentialsMissingServerURL{} +} + +// NewErrCredentialsMissingUsername creates a new error for +// errCredentialsMissingUsername. +func NewErrCredentialsMissingUsername() error { + return errCredentialsMissingUsername{} +} + +// IsCredentialsMissingServerURL returns true if the error +// was an errCredentialsMissingServerURL. +func IsCredentialsMissingServerURL(err error) bool { + _, ok := err.(errCredentialsMissingServerURL) + return ok +} + +// IsCredentialsMissingServerURLMessage checks for an +// errCredentialsMissingServerURL in the error message. +func IsCredentialsMissingServerURLMessage(err string) bool { + return err == errCredentialsMissingServerURLMessage +} + +// IsCredentialsMissingUsername returns true if the error +// was an errCredentialsMissingUsername. +func IsCredentialsMissingUsername(err error) bool { + _, ok := err.(errCredentialsMissingUsername) + return ok +} + +// IsCredentialsMissingUsernameMessage checks for an +// errCredentialsMissingUsername in the error message. +func IsCredentialsMissingUsernameMessage(err string) bool { + return err == errCredentialsMissingUsernameMessage +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go new file mode 100644 index 00000000..135acd25 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go @@ -0,0 +1,14 @@ +package credentials + +// Helper is the interface a credentials store helper must implement. +type Helper interface { + // Add appends credentials to the store. + Add(*Credentials) error + // Delete removes credentials from the store. + Delete(serverURL string) error + // Get retrieves credentials from the store. + // It returns username and secret as strings. + Get(serverURL string) (string, string, error) + // List returns the stored serverURLs and their associated usernames. + List() (map[string]string, error) +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go new file mode 100644 index 00000000..84377c26 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go @@ -0,0 +1,16 @@ +package credentials + +var ( + // Name is filled at linking time + Name = "" + + // Package is filled at linking time + Package = "github.com/docker/docker-credential-helpers" + + // Version holds the complete version number. Filled in at linking time. + Version = "v0.0.0+unknown" + + // Revision is filled with the VCS (e.g. git) revision being used to build + // the program at linking time. + Revision = "" +) diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go new file mode 100644 index 00000000..5e6310fd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -0,0 +1,93 @@ +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "errors" + "os" + "path/filepath" + "strings" +) + +// GetRuntimeDir returns XDG_RUNTIME_DIR. +// XDG_RUNTIME_DIR is typically configured via pam_systemd. +// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetRuntimeDir() (string, error) { + if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { + return xdgRuntimeDir, nil + } + return "", errors.New("could not get XDG_RUNTIME_DIR") +} + +// StickRuntimeDirContents sets the sticky bit on files that are under +// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. +// +// StickyRuntimeDir returns slice of sticked files. +// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func StickRuntimeDirContents(files []string) ([]string, error) { + runtimeDir, err := GetRuntimeDir() + if err != nil { + // ignore error if runtimeDir is empty + return nil, nil + } + runtimeDir, err = filepath.Abs(runtimeDir) + if err != nil { + return nil, err + } + var sticked []string + for _, f := range files { + f, err = filepath.Abs(f) + if err != nil { + return sticked, err + } + if strings.HasPrefix(f, runtimeDir+"/") { + if err = stick(f); err != nil { + return sticked, err + } + sticked = append(sticked, f) + } + } + return sticked, nil +} + +func stick(f string) error { + st, err := os.Stat(f) + if err != nil { + return err + } + m := st.Mode() + m |= os.ModeSticky + return os.Chmod(f, m) +} + +// GetDataHome returns XDG_DATA_HOME. +// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetDataHome() (string, error) { + if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { + return xdgDataHome, nil + } + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get either XDG_DATA_HOME or HOME") + } + return filepath.Join(home, ".local", "share"), nil +} + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go new file mode 100644 index 00000000..fc48e674 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -0,0 +1,28 @@ +//go:build !linux +// +build !linux + +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "errors" +) + +// GetRuntimeDir is unsupported on non-linux system. +func GetRuntimeDir() (string, error) { + return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") +} + +// StickRuntimeDirContents is unsupported on non-linux system. +func StickRuntimeDirContents(files []string) ([]string, error) { + return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") +} + +// GetDataHome is unsupported on non-linux system. +func GetDataHome() (string, error) { + return "", errors.New("homedir.GetDataHome() is not supported on this system") +} + +// GetConfigHome is unsupported on non-linux system. +func GetConfigHome() (string, error) { + return "", errors.New("homedir.GetConfigHome() is not supported on this system") +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go new file mode 100644 index 00000000..d1732dee --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go @@ -0,0 +1,39 @@ +//go:build !windows +// +build !windows + +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "os" + "os/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +// +// If linking statically with cgo enabled against glibc, ensure the +// osusergo build tag is used. +// +// If needing to do nss lookups, do not disable cgo or set osusergo. +func Get() string { + home := os.Getenv(Key()) + if home == "" { + if u, err := user.Current(); err == nil { + return u.HomeDir + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "~" +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go new file mode 100644 index 00000000..2f81813b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go @@ -0,0 +1,24 @@ +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + return os.Getenv(Key()) +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/vendor/github.com/google/go-containerregistry/LICENSE b/vendor/github.com/google/go-containerregistry/LICENSE new file mode 100644 index 00000000..7a4a3ea2 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/google/go-containerregistry/internal/and/and_closer.go b/vendor/github.com/google/go-containerregistry/internal/and/and_closer.go new file mode 100644 index 00000000..14a05eaa --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/internal/and/and_closer.go @@ -0,0 +1,48 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package and provides helpers for adding Close to io.{Reader|Writer}. +package and + +import ( + "io" +) + +// ReadCloser implements io.ReadCloser by reading from a particular io.Reader +// and then calling the provided "Close()" method. +type ReadCloser struct { + io.Reader + CloseFunc func() error +} + +var _ io.ReadCloser = (*ReadCloser)(nil) + +// Close implements io.ReadCloser +func (rac *ReadCloser) Close() error { + return rac.CloseFunc() +} + +// WriteCloser implements io.WriteCloser by reading from a particular io.Writer +// and then calling the provided "Close()" method. +type WriteCloser struct { + io.Writer + CloseFunc func() error +} + +var _ io.WriteCloser = (*WriteCloser)(nil) + +// Close implements io.WriteCloser +func (wac *WriteCloser) Close() error { + return wac.CloseFunc() +} diff --git a/vendor/github.com/google/go-containerregistry/internal/compression/compression.go b/vendor/github.com/google/go-containerregistry/internal/compression/compression.go new file mode 100644 index 00000000..01248715 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/internal/compression/compression.go @@ -0,0 +1,97 @@ +// Copyright 2022 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compression abstracts over gzip and zstd. +package compression + +import ( + "bufio" + "bytes" + "io" + + "github.com/google/go-containerregistry/internal/gzip" + "github.com/google/go-containerregistry/internal/zstd" + "github.com/google/go-containerregistry/pkg/compression" +) + +// Opener represents e.g. opening a file. +type Opener = func() (io.ReadCloser, error) + +// GetCompression detects whether an Opener is compressed and which algorithm is used. +func GetCompression(opener Opener) (compression.Compression, error) { + rc, err := opener() + if err != nil { + return compression.None, err + } + defer rc.Close() + + cp, _, err := PeekCompression(rc) + if err != nil { + return compression.None, err + } + + return cp, nil +} + +// PeekCompression detects whether the input stream is compressed and which algorithm is used. +// +// If r implements Peek, we will use that directly, otherwise a small number +// of bytes are buffered to Peek at the gzip/zstd header, and the returned +// PeekReader can be used as a replacement for the consumed input io.Reader. +func PeekCompression(r io.Reader) (compression.Compression, PeekReader, error) { + pr := intoPeekReader(r) + + if isGZip, _, err := checkHeader(pr, gzip.MagicHeader); err != nil { + return compression.None, pr, err + } else if isGZip { + return compression.GZip, pr, nil + } + + if isZStd, _, err := checkHeader(pr, zstd.MagicHeader); err != nil { + return compression.None, pr, err + } else if isZStd { + return compression.ZStd, pr, nil + } + + return compression.None, pr, nil +} + +// PeekReader is an io.Reader that also implements Peek a la bufio.Reader. +type PeekReader interface { + io.Reader + Peek(n int) ([]byte, error) +} + +// IntoPeekReader creates a PeekReader from an io.Reader. +// If the reader already has a Peek method, it will just return the passed reader. +func intoPeekReader(r io.Reader) PeekReader { + if p, ok := r.(PeekReader); ok { + return p + } + + return bufio.NewReader(r) +} + +// CheckHeader checks whether the first bytes from a PeekReader match an expected header +func checkHeader(pr PeekReader, expectedHeader []byte) (bool, PeekReader, error) { + header, err := pr.Peek(len(expectedHeader)) + if err != nil { + // https://github.com/google/go-containerregistry/issues/367 + if err == io.EOF { + return false, pr, nil + } + return false, pr, err + } + return bytes.Equal(header, expectedHeader), pr, nil +} diff --git a/vendor/github.com/google/go-containerregistry/internal/estargz/estargz.go b/vendor/github.com/google/go-containerregistry/internal/estargz/estargz.go new file mode 100644 index 00000000..69021bce --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/internal/estargz/estargz.go @@ -0,0 +1,54 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package estargz adapts the containerd estargz package to our abstractions. +package estargz + +import ( + "bytes" + "io" + + "github.com/containerd/stargz-snapshotter/estargz" + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +// Assert that what we're returning is an io.ReadCloser +var _ io.ReadCloser = (*estargz.Blob)(nil) + +// ReadCloser reads uncompressed tarball input from the io.ReadCloser and +// returns: +// - An io.ReadCloser from which compressed data may be read, and +// - A v1.Hash with the hash of the estargz table of contents, or +// - An error if the estargz processing encountered a problem. +// +// Refer to estargz for the options: +// https://pkg.go.dev/github.com/containerd/stargz-snapshotter/estargz@v0.4.1#Option +func ReadCloser(r io.ReadCloser, opts ...estargz.Option) (*estargz.Blob, v1.Hash, error) { + defer r.Close() + + // TODO(#876): Avoid buffering into memory. + bs, err := io.ReadAll(r) + if err != nil { + return nil, v1.Hash{}, err + } + br := bytes.NewReader(bs) + + rc, err := estargz.Build(io.NewSectionReader(br, 0, int64(len(bs))), opts...) + if err != nil { + return nil, v1.Hash{}, err + } + + h, err := v1.NewHash(rc.TOCDigest().String()) + return rc, h, err +} diff --git a/vendor/github.com/google/go-containerregistry/internal/gzip/zip.go b/vendor/github.com/google/go-containerregistry/internal/gzip/zip.go new file mode 100644 index 00000000..018c0f8c --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/internal/gzip/zip.go @@ -0,0 +1,118 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package gzip provides helper functions for interacting with gzipped streams. +package gzip + +import ( + "bufio" + "bytes" + "compress/gzip" + "io" + + "github.com/google/go-containerregistry/internal/and" +) + +// MagicHeader is the start of gzip files. +var MagicHeader = []byte{'\x1f', '\x8b'} + +// ReadCloser reads uncompressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which compressed data may be read. +// This uses gzip.BestSpeed for the compression level. +func ReadCloser(r io.ReadCloser) io.ReadCloser { + return ReadCloserLevel(r, gzip.BestSpeed) +} + +// ReadCloserLevel reads uncompressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which compressed data may be read. +// Refer to compress/gzip for the level: +// https://golang.org/pkg/compress/gzip/#pkg-constants +func ReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser { + pr, pw := io.Pipe() + + // For highly compressible layers, gzip.Writer will output a very small + // number of bytes per Write(). This is normally fine, but when pushing + // to a registry, we want to ensure that we're taking full advantage of + // the available bandwidth instead of sending tons of tiny writes over + // the wire. + // 64K ought to be small enough for anybody. + bw := bufio.NewWriterSize(pw, 2<<16) + + // Returns err so we can pw.CloseWithError(err) + go func() error { + // TODO(go1.14): Just defer {pw,gw,r}.Close like you'd expect. + // Context: https://golang.org/issue/24283 + gw, err := gzip.NewWriterLevel(bw, level) + if err != nil { + return pw.CloseWithError(err) + } + + if _, err := io.Copy(gw, r); err != nil { + defer r.Close() + defer gw.Close() + return pw.CloseWithError(err) + } + + // Close gzip writer to Flush it and write gzip trailers. + if err := gw.Close(); err != nil { + return pw.CloseWithError(err) + } + + // Flush bufio writer to ensure we write out everything. + if err := bw.Flush(); err != nil { + return pw.CloseWithError(err) + } + + // We don't really care if these fail. + defer pw.Close() + defer r.Close() + + return nil + }() + + return pr +} + +// UnzipReadCloser reads compressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which uncompressed data may be read. +func UnzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { + gr, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + return &and.ReadCloser{ + Reader: gr, + CloseFunc: func() error { + // If the unzip fails, then this seems to return the same + // error as the read. We don't want this to interfere with + // us closing the main ReadCloser, since this could leave + // an open file descriptor (fails on Windows). + gr.Close() + return r.Close() + }, + }, nil +} + +// Is detects whether the input stream is compressed. +func Is(r io.Reader) (bool, error) { + magicHeader := make([]byte, 2) + n, err := r.Read(magicHeader) + if n == 0 && err == io.EOF { + return false, nil + } + if err != nil { + return false, err + } + return bytes.Equal(magicHeader, MagicHeader), nil +} diff --git a/vendor/github.com/google/go-containerregistry/internal/legacy/copy.go b/vendor/github.com/google/go-containerregistry/internal/legacy/copy.go new file mode 100644 index 00000000..10467ba1 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/internal/legacy/copy.go @@ -0,0 +1,57 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package legacy provides methods for interacting with legacy image formats. +package legacy + +import ( + "bytes" + "encoding/json" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +// CopySchema1 allows `[g]crane cp` to work with old images without adding +// full support for schema 1 images to this package. +func CopySchema1(desc *remote.Descriptor, srcRef, dstRef name.Reference, opts ...remote.Option) error { + m := schema1{} + if err := json.NewDecoder(bytes.NewReader(desc.Manifest)).Decode(&m); err != nil { + return err + } + + for _, layer := range m.FSLayers { + src := srcRef.Context().Digest(layer.BlobSum) + dst := dstRef.Context().Digest(layer.BlobSum) + + blob, err := remote.Layer(src, opts...) + if err != nil { + return err + } + + if err := remote.WriteLayer(dst.Context(), blob, opts...); err != nil { + return err + } + } + + return remote.Put(dstRef, desc, opts...) +} + +type fslayer struct { + BlobSum string `json:"blobSum"` +} + +type schema1 struct { + FSLayers []fslayer `json:"fsLayers"` +} diff --git a/vendor/github.com/google/go-containerregistry/internal/redact/redact.go b/vendor/github.com/google/go-containerregistry/internal/redact/redact.go new file mode 100644 index 00000000..b2e3f186 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/internal/redact/redact.go @@ -0,0 +1,89 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package redact contains a simple context signal for redacting requests. +package redact + +import ( + "context" + "errors" + "net/url" +) + +type contextKey string + +var redactKey = contextKey("redact") + +// NewContext creates a new ctx with the reason for redaction. +func NewContext(ctx context.Context, reason string) context.Context { + return context.WithValue(ctx, redactKey, reason) +} + +// FromContext returns the redaction reason, if any. +func FromContext(ctx context.Context) (bool, string) { + reason, ok := ctx.Value(redactKey).(string) + return ok, reason +} + +// Error redacts potentially sensitive query parameter values in the URL from the error's message. +// +// If the error is a *url.Error, this returns a *url.Error with the URL redacted. +// Any other error type, or nil, is returned unchanged. +func Error(err error) error { + // If the error is a url.Error, we can redact the URL. + // Otherwise (including if err is nil), we can't redact. + var uerr *url.Error + if ok := errors.As(err, &uerr); !ok { + return err + } + u, perr := url.Parse(uerr.URL) + if perr != nil { + return err // If the URL can't be parsed, just return the original error. + } + uerr.URL = URL(u).String() // Update the URL to the redacted URL. + return uerr +} + +// The set of query string keys that we expect to send as part of the registry +// protocol. Anything else is potentially dangerous to leak, as it's probably +// from a redirect. These redirects often included tokens or signed URLs. +var paramAllowlist = map[string]struct{}{ + // Token exchange + "scope": {}, + "service": {}, + // Cross-repo mounting + "mount": {}, + "from": {}, + // Layer PUT + "digest": {}, + // Listing tags and catalog + "n": {}, + "last": {}, +} + +// URL redacts potentially sensitive query parameter values from the URL's query string. +func URL(u *url.URL) *url.URL { + qs := u.Query() + for k, v := range qs { + for i := range v { + if _, ok := paramAllowlist[k]; !ok { + // key is not in the Allowlist + v[i] = "REDACTED" + } + } + } + r := *u + r.RawQuery = qs.Encode() + return &r +} diff --git a/vendor/github.com/google/go-containerregistry/internal/retry/retry.go b/vendor/github.com/google/go-containerregistry/internal/retry/retry.go new file mode 100644 index 00000000..c9e35645 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/internal/retry/retry.go @@ -0,0 +1,94 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package retry provides methods for retrying operations. It is a thin wrapper +// around k8s.io/apimachinery/pkg/util/wait to make certain operations easier. +package retry + +import ( + "context" + "errors" + "fmt" + + "github.com/google/go-containerregistry/internal/retry/wait" +) + +// Backoff is an alias of our own wait.Backoff to avoid name conflicts with +// the kubernetes wait package. Typing retry.Backoff is aesier than fixing +// the wrong import every time you use wait.Backoff. +type Backoff = wait.Backoff + +// This is implemented by several errors in the net package as well as our +// transport.Error. +type temporary interface { + Temporary() bool +} + +// IsTemporary returns true if err implements Temporary() and it returns true. +func IsTemporary(err error) bool { + if errors.Is(err, context.DeadlineExceeded) { + return false + } + if te, ok := err.(temporary); ok && te.Temporary() { + return true + } + return false +} + +// IsNotNil returns true if err is not nil. +func IsNotNil(err error) bool { + return err != nil +} + +// Predicate determines whether an error should be retried. +type Predicate func(error) (retry bool) + +// Retry retries a given function, f, until a predicate is satisfied, using +// exponential backoff. If the predicate is never satisfied, it will return the +// last error returned by f. +func Retry(f func() error, p Predicate, backoff wait.Backoff) (err error) { + if f == nil { + return fmt.Errorf("nil f passed to retry") + } + if p == nil { + return fmt.Errorf("nil p passed to retry") + } + + condition := func() (bool, error) { + err = f() + if p(err) { + return false, nil + } + return true, err + } + + wait.ExponentialBackoff(backoff, condition) + return +} + +type contextKey string + +var key = contextKey("never") + +// Never returns a context that signals something should not be retried. +// This is a hack and can be used to communicate across package boundaries +// to avoid retry amplification. +func Never(ctx context.Context) context.Context { + return context.WithValue(ctx, key, true) +} + +// Ever returns true if the context was wrapped by Never. +func Ever(ctx context.Context) bool { + return ctx.Value(key) == nil +} diff --git a/vendor/github.com/google/go-containerregistry/internal/retry/wait/kubernetes_apimachinery_wait.go b/vendor/github.com/google/go-containerregistry/internal/retry/wait/kubernetes_apimachinery_wait.go new file mode 100644 index 00000000..ab06e5f1 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/internal/retry/wait/kubernetes_apimachinery_wait.go @@ -0,0 +1,123 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package wait is a subset of k8s.io/apimachinery to avoid conflicts +// in dependencies (specifically, logging). +package wait + +import ( + "errors" + "math/rand" + "time" +) + +// Jitter returns a time.Duration between duration and duration + maxFactor * +// duration. +// +// This allows clients to avoid converging on periodic behavior. If maxFactor +// is 0.0, a suggested default value will be chosen. +func Jitter(duration time.Duration, maxFactor float64) time.Duration { + if maxFactor <= 0.0 { + maxFactor = 1.0 + } + wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration)) + return wait +} + +// ErrWaitTimeout is returned when the condition exited without success. +var ErrWaitTimeout = errors.New("timed out waiting for the condition") + +// ConditionFunc returns true if the condition is satisfied, or an error +// if the loop should be aborted. +type ConditionFunc func() (done bool, err error) + +// Backoff holds parameters applied to a Backoff function. +type Backoff struct { + // The initial duration. + Duration time.Duration + // Duration is multiplied by factor each iteration, if factor is not zero + // and the limits imposed by Steps and Cap have not been reached. + // Should not be negative. + // The jitter does not contribute to the updates to the duration parameter. + Factor float64 + // The sleep at each iteration is the duration plus an additional + // amount chosen uniformly at random from the interval between + // zero and `jitter*duration`. + Jitter float64 + // The remaining number of iterations in which the duration + // parameter may change (but progress can be stopped earlier by + // hitting the cap). If not positive, the duration is not + // changed. Used for exponential backoff in combination with + // Factor and Cap. + Steps int + // A limit on revised values of the duration parameter. If a + // multiplication by the factor parameter would make the duration + // exceed the cap then the duration is set to the cap and the + // steps parameter is set to zero. + Cap time.Duration +} + +// Step (1) returns an amount of time to sleep determined by the +// original Duration and Jitter and (2) mutates the provided Backoff +// to update its Steps and Duration. +func (b *Backoff) Step() time.Duration { + if b.Steps < 1 { + if b.Jitter > 0 { + return Jitter(b.Duration, b.Jitter) + } + return b.Duration + } + b.Steps-- + + duration := b.Duration + + // calculate the next step + if b.Factor != 0 { + b.Duration = time.Duration(float64(b.Duration) * b.Factor) + if b.Cap > 0 && b.Duration > b.Cap { + b.Duration = b.Cap + b.Steps = 0 + } + } + + if b.Jitter > 0 { + duration = Jitter(duration, b.Jitter) + } + return duration +} + +// ExponentialBackoff repeats a condition check with exponential backoff. +// +// It repeatedly checks the condition and then sleeps, using `backoff.Step()` +// to determine the length of the sleep and adjust Duration and Steps. +// Stops and returns as soon as: +// 1. the condition check returns true or an error, +// 2. `backoff.Steps` checks of the condition have been done, or +// 3. a sleep truncated by the cap on duration has been completed. +// In case (1) the returned error is what the condition function returned. +// In all other cases, ErrWaitTimeout is returned. +func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { + for backoff.Steps > 0 { + if ok, err := condition(); err != nil || ok { + return err + } + if backoff.Steps == 1 { + break + } + time.Sleep(backoff.Step()) + } + return ErrWaitTimeout +} diff --git a/vendor/github.com/google/go-containerregistry/internal/verify/verify.go b/vendor/github.com/google/go-containerregistry/internal/verify/verify.go new file mode 100644 index 00000000..463f7e4b --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/internal/verify/verify.go @@ -0,0 +1,122 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package verify provides a ReadCloser that verifies content matches the +// expected hash values. +package verify + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + + "github.com/google/go-containerregistry/internal/and" + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +// SizeUnknown is a sentinel value to indicate that the expected size is not known. +const SizeUnknown = -1 + +type verifyReader struct { + inner io.Reader + hasher hash.Hash + expected v1.Hash + gotSize, wantSize int64 +} + +// Error provides information about the failed hash verification. +type Error struct { + got string + want v1.Hash + gotSize int64 +} + +func (v Error) Error() string { + return fmt.Sprintf("error verifying %s checksum after reading %d bytes; got %q, want %q", + v.want.Algorithm, v.gotSize, v.got, v.want) +} + +// Read implements io.Reader +func (vc *verifyReader) Read(b []byte) (int, error) { + n, err := vc.inner.Read(b) + vc.gotSize += int64(n) + if err == io.EOF { + if vc.wantSize != SizeUnknown && vc.gotSize != vc.wantSize { + return n, fmt.Errorf("error verifying size; got %d, want %d", vc.gotSize, vc.wantSize) + } + got := hex.EncodeToString(vc.hasher.Sum(nil)) + if want := vc.expected.Hex; got != want { + return n, Error{ + got: vc.expected.Algorithm + ":" + got, + want: vc.expected, + gotSize: vc.gotSize, + } + } + } + return n, err +} + +// ReadCloser wraps the given io.ReadCloser to verify that its contents match +// the provided v1.Hash before io.EOF is returned. +// +// The reader will only be read up to size bytes, to prevent resource +// exhaustion. If EOF is returned before size bytes are read, an error is +// returned. +// +// A size of SizeUnknown (-1) indicates disables size verification when the size +// is unknown ahead of time. +func ReadCloser(r io.ReadCloser, size int64, h v1.Hash) (io.ReadCloser, error) { + w, err := v1.Hasher(h.Algorithm) + if err != nil { + return nil, err + } + r2 := io.TeeReader(r, w) // pass all writes to the hasher. + if size != SizeUnknown { + r2 = io.LimitReader(r2, size) // if we know the size, limit to that size. + } + return &and.ReadCloser{ + Reader: &verifyReader{ + inner: r2, + hasher: w, + expected: h, + wantSize: size, + }, + CloseFunc: r.Close, + }, nil +} + +// Descriptor verifies that the embedded Data field matches the Size and Digest +// fields of the given v1.Descriptor, returning an error if the Data field is +// missing or if it contains incorrect data. +func Descriptor(d v1.Descriptor) error { + if d.Data == nil { + return errors.New("error verifying descriptor; Data == nil") + } + + h, sz, err := v1.SHA256(bytes.NewReader(d.Data)) + if err != nil { + return err + } + if h != d.Digest { + return fmt.Errorf("error verifying Digest; got %q, want %q", h, d.Digest) + } + if sz != d.Size { + return fmt.Errorf("error verifying Size; got %d, want %d", sz, d.Size) + } + + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/internal/windows/windows.go b/vendor/github.com/google/go-containerregistry/internal/windows/windows.go new file mode 100644 index 00000000..62d04cfb --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/internal/windows/windows.go @@ -0,0 +1,114 @@ +// Copyright 2021 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package windows + +import ( + "archive/tar" + "bytes" + "errors" + "fmt" + "io" + "path" + "strings" + + "github.com/google/go-containerregistry/internal/gzip" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/tarball" +) + +// userOwnerAndGroupSID is a magic value needed to make the binary executable +// in a Windows container. +// +// owner: BUILTIN/Users group: BUILTIN/Users ($sddlValue="O:BUG:BU") +const userOwnerAndGroupSID = "AQAAgBQAAAAkAAAAAAAAAAAAAAABAgAAAAAABSAAAAAhAgAAAQIAAAAAAAUgAAAAIQIAAA==" + +// Windows returns a Layer that is converted to be pullable on Windows. +func Windows(layer v1.Layer) (v1.Layer, error) { + // TODO: do this lazily. + + layerReader, err := layer.Uncompressed() + if err != nil { + return nil, fmt.Errorf("getting layer: %w", err) + } + defer layerReader.Close() + tarReader := tar.NewReader(layerReader) + w := new(bytes.Buffer) + tarWriter := tar.NewWriter(w) + defer tarWriter.Close() + + for _, dir := range []string{"Files", "Hives"} { + if err := tarWriter.WriteHeader(&tar.Header{ + Name: dir, + Typeflag: tar.TypeDir, + // Use a fixed Mode, so that this isn't sensitive to the directory and umask + // under which it was created. Additionally, windows can only set 0222, + // 0444, or 0666, none of which are executable. + Mode: 0555, + Format: tar.FormatPAX, + }); err != nil { + return nil, fmt.Errorf("writing %s directory: %w", dir, err) + } + } + + for { + header, err := tarReader.Next() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, fmt.Errorf("reading layer: %w", err) + } + + if strings.HasPrefix(header.Name, "Files/") { + return nil, fmt.Errorf("file path %q already suitable for Windows", header.Name) + } + + header.Name = path.Join("Files", header.Name) + header.Format = tar.FormatPAX + + // TODO: this seems to make the file executable on Windows; + // only do this if the file should be executable. + if header.PAXRecords == nil { + header.PAXRecords = map[string]string{} + } + header.PAXRecords["MSWINDOWS.rawsd"] = userOwnerAndGroupSID + + if err := tarWriter.WriteHeader(header); err != nil { + return nil, fmt.Errorf("writing tar header: %w", err) + } + + if header.Typeflag == tar.TypeReg { + if _, err = io.Copy(tarWriter, tarReader); err != nil { + return nil, fmt.Errorf("writing layer file: %w", err) + } + } + } + + if err := tarWriter.Close(); err != nil { + return nil, err + } + + b := w.Bytes() + // gzip the contents, then create the layer + opener := func() (io.ReadCloser, error) { + return gzip.ReadCloser(io.NopCloser(bytes.NewReader(b))), nil + } + layer, err = tarball.LayerFromOpener(opener) + if err != nil { + return nil, fmt.Errorf("creating layer: %w", err) + } + + return layer, nil +} diff --git a/vendor/github.com/google/go-containerregistry/internal/zstd/zstd.go b/vendor/github.com/google/go-containerregistry/internal/zstd/zstd.go new file mode 100644 index 00000000..cccf54a3 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/internal/zstd/zstd.go @@ -0,0 +1,116 @@ +// Copyright 2022 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package zstd provides helper functions for interacting with zstd streams. +package zstd + +import ( + "bufio" + "bytes" + "io" + + "github.com/google/go-containerregistry/internal/and" + "github.com/klauspost/compress/zstd" +) + +// MagicHeader is the start of zstd files. +var MagicHeader = []byte{'\x28', '\xb5', '\x2f', '\xfd'} + +// ReadCloser reads uncompressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which compressed data may be read. +// This uses zstd level 1 for the compression. +func ReadCloser(r io.ReadCloser) io.ReadCloser { + return ReadCloserLevel(r, 1) +} + +// ReadCloserLevel reads uncompressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which compressed data may be read. +func ReadCloserLevel(r io.ReadCloser, level int) io.ReadCloser { + pr, pw := io.Pipe() + + // For highly compressible layers, zstd.Writer will output a very small + // number of bytes per Write(). This is normally fine, but when pushing + // to a registry, we want to ensure that we're taking full advantage of + // the available bandwidth instead of sending tons of tiny writes over + // the wire. + // 64K ought to be small enough for anybody. + bw := bufio.NewWriterSize(pw, 2<<16) + + // Returns err so we can pw.CloseWithError(err) + go func() error { + // TODO(go1.14): Just defer {pw,zw,r}.Close like you'd expect. + // Context: https://golang.org/issue/24283 + zw, err := zstd.NewWriter(bw, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(level))) + if err != nil { + return pw.CloseWithError(err) + } + + if _, err := io.Copy(zw, r); err != nil { + defer r.Close() + defer zw.Close() + return pw.CloseWithError(err) + } + + // Close zstd writer to Flush it and write zstd trailers. + if err := zw.Close(); err != nil { + return pw.CloseWithError(err) + } + + // Flush bufio writer to ensure we write out everything. + if err := bw.Flush(); err != nil { + return pw.CloseWithError(err) + } + + // We don't really care if these fail. + defer pw.Close() + defer r.Close() + + return nil + }() + + return pr +} + +// UnzipReadCloser reads compressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which uncompressed data may be read. +func UnzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { + gr, err := zstd.NewReader(r) + if err != nil { + return nil, err + } + return &and.ReadCloser{ + Reader: gr, + CloseFunc: func() error { + // If the unzip fails, then this seems to return the same + // error as the read. We don't want this to interfere with + // us closing the main ReadCloser, since this could leave + // an open file descriptor (fails on Windows). + gr.Close() + return r.Close() + }, + }, nil +} + +// Is detects whether the input stream is compressed. +func Is(r io.Reader) (bool, error) { + magicHeader := make([]byte, 4) + n, err := r.Read(magicHeader) + if n == 0 && err == io.EOF { + return false, nil + } + if err != nil { + return false, err + } + return bytes.Equal(magicHeader, MagicHeader), nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/README.md b/vendor/github.com/google/go-containerregistry/pkg/authn/README.md new file mode 100644 index 00000000..042bddec --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/README.md @@ -0,0 +1,322 @@ +# `authn` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/authn?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/authn) + +This README outlines how we acquire and use credentials when interacting with a registry. + +As much as possible, we attempt to emulate `docker`'s authentication behavior and configuration so that this library "just works" if you've already configured credentials that work with `docker`; however, when things don't work, a basic understanding of what's going on can help with debugging. + +The official documentation for how authentication with `docker` works is (reasonably) scattered across several different sites and GitHub repositories, so we've tried to summarize the relevant bits here. + +## tl;dr for consumers of this package + +By default, [`pkg/v1/remote`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) uses [`Anonymous`](https://godoc.org/github.com/google/go-containerregistry/pkg/authn#Anonymous) credentials (i.e. _none_), which for most registries will only allow read access to public images. + +To use the credentials found in your Docker config file, you can use the [`DefaultKeychain`](https://godoc.org/github.com/google/go-containerregistry/pkg/authn#DefaultKeychain), e.g.: + +```go +package main + +import ( + "fmt" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +func main() { + ref, err := name.ParseReference("registry.example.com/private/repo") + if err != nil { + panic(err) + } + + // Fetch the manifest using default credentials. + img, err := remote.Get(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain)) + if err != nil { + panic(err) + } + + // Prints the digest of registry.example.com/private/repo + fmt.Println(img.Digest) +} +``` + +The `DefaultKeychain` will use credentials as described in your Docker config file -- usually `~/.docker/config.json`, or `%USERPROFILE%\.docker\config.json` on Windows -- or the location described by the `DOCKER_CONFIG` environment variable, if set. + +If those are not found, `DefaultKeychain` will look for credentials configured using [Podman's expectation](https://docs.podman.io/en/latest/markdown/podman-login.1.html) that these are found in `${XDG_RUNTIME_DIR}/containers/auth.json`. + +[See below](#docker-config-auth) for more information about what is configured in this file. + +## Emulating Cloud Provider Credential Helpers + +[`pkg/v1/google.Keychain`](https://pkg.go.dev/github.com/google/go-containerregistry/pkg/v1/google#Keychain) provides a `Keychain` implementation that emulates [`docker-credential-gcr`](https://github.com/GoogleCloudPlatform/docker-credential-gcr) to find credentials in the environment. +See [`google.NewEnvAuthenticator`](https://pkg.go.dev/github.com/google/go-containerregistry/pkg/v1/google#NewEnvAuthenticator) and [`google.NewGcloudAuthenticator`](https://pkg.go.dev/github.com/google/go-containerregistry/pkg/v1/google#NewGcloudAuthenticator) for more information. + +To emulate other credential helpers without requiring them to be available as executables, [`NewKeychainFromHelper`](https://pkg.go.dev/github.com/google/go-containerregistry/pkg/authn#NewKeychainFromHelper) provides an adapter that takes a Go implementation satisfying a subset of the [`credentials.Helper`](https://pkg.go.dev/github.com/docker/docker-credential-helpers/credentials#Helper) interface, and makes it available as a `Keychain`. + +This means that you can emulate, for example, [Amazon ECR's `docker-credential-ecr-login` credential helper](https://github.com/awslabs/amazon-ecr-credential-helper) using the same implementation: + +```go +import ( + ecr "github.com/awslabs/amazon-ecr-credential-helper/ecr-login" + "github.com/awslabs/amazon-ecr-credential-helper/ecr-login/api" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +func main() { + // ... + ecrHelper := ecr.ECRHelper{ClientFactory: api.DefaultClientFactory{}} + img, err := remote.Get(ref, remote.WithAuthFromKeychain(authn.NewKeychainFromHelper(ecrHelper))) + if err != nil { + panic(err) + } + // ... +} +``` + +Likewise, you can emulate [Azure's ACR `docker-credential-acr-env` credential helper](https://github.com/chrismellard/docker-credential-acr-env): + +```go +import ( + "github.com/chrismellard/docker-credential-acr-env/pkg/credhelper" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +func main() { + // ... + acrHelper := credhelper.NewACRCredentialsHelper() + img, err := remote.Get(ref, remote.WithAuthFromKeychain(authn.NewKeychainFromHelper(acrHelper))) + if err != nil { + panic(err) + } + // ... +} +``` + + + +## Using Multiple `Keychain`s + +[`NewMultiKeychain`](https://pkg.go.dev/github.com/google/go-containerregistry/pkg/authn#NewMultiKeychain) allows you to specify multiple `Keychain` implementations, which will be checked in order when credentials are needed. + +For example: + +```go +kc := authn.NewMultiKeychain( + authn.DefaultKeychain, + google.Keychain, + authn.NewKeychainFromHelper(ecr.ECRHelper{ClientFactory: api.DefaultClientFactory{}}), + authn.NewKeychainFromHelper(acr.ACRCredHelper{}), +) +``` + +This multi-keychain will: + +- first check for credentials found in the Docker config file, as describe above, then +- check for GCP credentials available in the environment, as described above, then +- check for ECR credentials by emulating the ECR credential helper, then +- check for ACR credentials by emulating the ACR credential helper. + +If any keychain implementation is able to provide credentials for the request, they will be used, and further keychain implementations will not be consulted. + +If no implementations are able to provide credentials, `Anonymous` credentials will be used. + +## Docker Config Auth + +What follows attempts to gather useful information about Docker's config.json and make it available in one place. + +If you have questions, please [file an issue](https://github.com/google/go-containerregistry/issues/new). + +### Plaintext + +The config file is where your credentials are stored when you invoke `docker login`, e.g. the contents may look something like this: + +```json +{ + "auths": { + "registry.example.com": { + "auth": "QXp1cmVEaWFtb25kOmh1bnRlcjI=" + } + } +} +``` + +The `auths` map has an entry per registry, and the `auth` field contains your username and password encoded as [HTTP 'Basic' Auth](https://tools.ietf.org/html/rfc7617). + +**NOTE**: This means that your credentials are stored _in plaintext_: + +```bash +$ echo "QXp1cmVEaWFtb25kOmh1bnRlcjI=" | base64 -d +AzureDiamond:hunter2 +``` + +For what it's worth, this config file is equivalent to: + +```json +{ + "auths": { + "registry.example.com": { + "username": "AzureDiamond", + "password": "hunter2" + } + } +} +``` + +... which is useful to know if e.g. your CI system provides you a registry username and password via environment variables and you want to populate this file manually without invoking `docker login`. + +### Helpers + +If you log in like this, `docker` will warn you that you should use a [credential helper](https://docs.docker.com/engine/reference/commandline/login/#credentials-store), and you should! + +To configure a global credential helper: +```json +{ + "credsStore": "osxkeychain" +} +``` + +To configure a per-registry credential helper: +```json +{ + "credHelpers": { + "gcr.io": "gcr" + } +} +``` + +We use [`github.com/docker/cli/cli/config.Load`](https://godoc.org/github.com/docker/cli/cli/config#Load) to parse the config file and invoke any necessary credential helpers. This handles the logic of taking a [`ConfigFile`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/configfile/file.go#L25-L54) + registry domain and producing an [`AuthConfig`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L3-L22), which determines how we authenticate to the registry. + +## Credential Helpers + +The [credential helper protocol](https://github.com/docker/docker-credential-helpers) allows you to configure a binary that supplies credentials for the registry, rather than hard-coding them in the config file. + +The protocol has several verbs, but the one we most care about is `get`. + +For example, using the following config file: +```json +{ + "credHelpers": { + "gcr.io": "gcr", + "eu.gcr.io": "gcr" + } +} +``` + +To acquire credentials for `gcr.io`, we look in the `credHelpers` map to find +the credential helper for `gcr.io` is `gcr`. By appending that value to +`docker-credential-`, we can get the name of the binary we need to use. + +For this example, that's `docker-credential-gcr`, which must be on our `$PATH`. +We'll then invoke that binary to get credentials: + +```bash +$ echo "gcr.io" | docker-credential-gcr get +{"Username":"_token","Secret":""} +``` + +You can configure the same credential helper for multiple registries, which is +why we need to pass the domain in via STDIN, e.g. if we were trying to access +`eu.gcr.io`, we'd do this instead: + +```bash +$ echo "eu.gcr.io" | docker-credential-gcr get +{"Username":"_token","Secret":""} +``` + +### Debugging credential helpers + +If a credential helper is configured but doesn't seem to be working, it can be +challenging to debug. Implementing a fake credential helper lets you poke around +to make it easier to see where the failure is happening. + +This "implements" a credential helper with hard-coded values: +``` +#!/usr/bin/env bash +echo '{"Username":"","Secret":"hunter2"}' +``` + + +This implements a credential helper that prints the output of +`docker-credential-gcr` to both stderr and whatever called it, which allows you +to snoop on another credential helper: +``` +#!/usr/bin/env bash +docker-credential-gcr $@ | tee >(cat 1>&2) +``` + +Put those files somewhere on your path, naming them e.g. +`docker-credential-hardcoded` and `docker-credential-tee`, then modify the +config file to use them: + +```json +{ + "credHelpers": { + "gcr.io": "tee", + "eu.gcr.io": "hardcoded" + } +} +``` + +The `docker-credential-tee` trick works with both `crane` and `docker`: + +```bash +$ crane manifest gcr.io/google-containers/pause > /dev/null +{"ServerURL":"","Username":"_dcgcr_1_5_0_token","Secret":""} + +$ docker pull gcr.io/google-containers/pause +Using default tag: latest +{"ServerURL":"","Username":"_dcgcr_1_5_0_token","Secret":""} +latest: Pulling from google-containers/pause +a3ed95caeb02: Pull complete +4964c72cd024: Pull complete +Digest: sha256:a78c2d6208eff9b672de43f880093100050983047b7b0afe0217d3656e1b0d5f +Status: Downloaded newer image for gcr.io/google-containers/pause:latest +gcr.io/google-containers/pause:latest +``` + +## The Registry + +There are two methods for authenticating against a registry: +[token](https://docs.docker.com/registry/spec/auth/token/) and +[oauth2](https://docs.docker.com/registry/spec/auth/oauth/). + +Both methods are used to acquire an opaque `Bearer` token (or +[RegistryToken](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L21)) +to use in the `Authorization` header. The registry will return a `401 +Unauthorized` during the [version +check](https://github.com/opencontainers/distribution-spec/blob/2c3975d1f03b67c9a0203199038adea0413f0573/spec.md#api-version-check) +(or during normal operations) with +[Www-Authenticate](https://tools.ietf.org/html/rfc7235#section-4.1) challenge +indicating how to proceed. + +### Token + +If we get back an `AuthConfig` containing a [`Username/Password`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L5-L6) +or +[`Auth`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L7), +we'll use the token method for authentication: + +![basic](../../images/credhelper-basic.svg) + +### OAuth 2 + +If we get back an `AuthConfig` containing an [`IdentityToken`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L18) +we'll use the oauth2 method for authentication: + +![oauth](../../images/credhelper-oauth.svg) + +This happens when a credential helper returns a response with the +[`Username`](https://github.com/docker/docker-credential-helpers/blob/f78081d1f7fef6ad74ad6b79368de6348386e591/credentials/credentials.go#L16) +set to `` (no, that's not a placeholder, the literal string `""`). +It is unclear why: [moby/moby#36926](https://github.com/moby/moby/issues/36926). + +We only support the oauth2 `grant_type` for `refresh_token` ([#629](https://github.com/google/go-containerregistry/issues/629)), +since it's impossible to determine from the registry response whether we should +use oauth, and the token method for authentication is widely implemented by +registries. diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go b/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go new file mode 100644 index 00000000..83214957 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/anon.go @@ -0,0 +1,26 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +// anonymous implements Authenticator for anonymous authentication. +type anonymous struct{} + +// Authorization implements Authenticator. +func (a *anonymous) Authorization() (*AuthConfig, error) { + return &AuthConfig{}, nil +} + +// Anonymous is a singleton Authenticator for providing anonymous auth. +var Anonymous Authenticator = &anonymous{} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go b/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go new file mode 100644 index 00000000..0111f1ae --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/auth.go @@ -0,0 +1,30 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +// auth is an Authenticator that simply returns the wrapped AuthConfig. +type auth struct { + config AuthConfig +} + +// FromConfig returns an Authenticator that just returns the given AuthConfig. +func FromConfig(cfg AuthConfig) Authenticator { + return &auth{cfg} +} + +// Authorization implements Authenticator. +func (a *auth) Authorization() (*AuthConfig, error) { + return &a.config, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go b/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go new file mode 100644 index 00000000..172d218e --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/authn.go @@ -0,0 +1,115 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strings" +) + +// Authenticator is used to authenticate Docker transports. +type Authenticator interface { + // Authorization returns the value to use in an http transport's Authorization header. + Authorization() (*AuthConfig, error) +} + +// AuthConfig contains authorization information for connecting to a Registry +// Inlined what we use from github.com/docker/cli/cli/config/types +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} + +// This is effectively a copy of the type AuthConfig. This simplifies +// JSON unmarshalling since AuthConfig methods are not inherited +type authConfig AuthConfig + +// UnmarshalJSON implements json.Unmarshaler +func (a *AuthConfig) UnmarshalJSON(data []byte) error { + var shadow authConfig + err := json.Unmarshal(data, &shadow) + if err != nil { + return err + } + + *a = (AuthConfig)(shadow) + + if len(shadow.Auth) != 0 { + var derr error + a.Username, a.Password, derr = decodeDockerConfigFieldAuth(shadow.Auth) + if derr != nil { + err = fmt.Errorf("unable to decode auth field: %w", derr) + } + } else if len(a.Username) != 0 && len(a.Password) != 0 { + a.Auth = encodeDockerConfigFieldAuth(shadow.Username, shadow.Password) + } + + return err +} + +// MarshalJSON implements json.Marshaler +func (a AuthConfig) MarshalJSON() ([]byte, error) { + shadow := (authConfig)(a) + shadow.Auth = encodeDockerConfigFieldAuth(shadow.Username, shadow.Password) + return json.Marshal(shadow) +} + +// decodeDockerConfigFieldAuth deserializes the "auth" field from dockercfg into a +// username and a password. The format of the auth field is base64(:). +// +// From https://github.com/kubernetes/kubernetes/blob/75e49ec824b183288e1dbaccfd7dbe77d89db381/pkg/credentialprovider/config.go +// Copyright 2014 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 +func decodeDockerConfigFieldAuth(field string) (username, password string, err error) { + var decoded []byte + // StdEncoding can only decode padded string + // RawStdEncoding can only decode unpadded string + if strings.HasSuffix(strings.TrimSpace(field), "=") { + // decode padded data + decoded, err = base64.StdEncoding.DecodeString(field) + } else { + // decode unpadded data + decoded, err = base64.RawStdEncoding.DecodeString(field) + } + + if err != nil { + return + } + + parts := strings.SplitN(string(decoded), ":", 2) + if len(parts) != 2 { + err = fmt.Errorf("must be formatted as base64(username:password)") + return + } + + username = parts[0] + password = parts[1] + + return +} + +func encodeDockerConfigFieldAuth(username, password string) string { + return base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go b/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go new file mode 100644 index 00000000..500cb661 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/basic.go @@ -0,0 +1,29 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +// Basic implements Authenticator for basic authentication. +type Basic struct { + Username string + Password string +} + +// Authorization implements Authenticator. +func (b *Basic) Authorization() (*AuthConfig, error) { + return &AuthConfig{ + Username: b.Username, + Password: b.Password, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go b/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go new file mode 100644 index 00000000..4cf86df9 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go @@ -0,0 +1,27 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +// Bearer implements Authenticator for bearer authentication. +type Bearer struct { + Token string `json:"token"` +} + +// Authorization implements Authenticator. +func (b *Bearer) Authorization() (*AuthConfig, error) { + return &AuthConfig{ + RegistryToken: b.Token, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go b/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go new file mode 100644 index 00000000..c2a5fc02 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package authn defines different methods of authentication for +// talking to a container registry. +package authn diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go new file mode 100644 index 00000000..a4a88b3d --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go @@ -0,0 +1,180 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +import ( + "os" + "path/filepath" + "sync" + + "github.com/docker/cli/cli/config" + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/config/types" + "github.com/google/go-containerregistry/pkg/name" + "github.com/mitchellh/go-homedir" +) + +// Resource represents a registry or repository that can be authenticated against. +type Resource interface { + // String returns the full string representation of the target, e.g. + // gcr.io/my-project or just gcr.io. + String() string + + // RegistryStr returns just the registry portion of the target, e.g. for + // gcr.io/my-project, this should just return gcr.io. This is needed to + // pull out an appropriate hostname. + RegistryStr() string +} + +// Keychain is an interface for resolving an image reference to a credential. +type Keychain interface { + // Resolve looks up the most appropriate credential for the specified target. + Resolve(Resource) (Authenticator, error) +} + +// defaultKeychain implements Keychain with the semantics of the standard Docker +// credential keychain. +type defaultKeychain struct { + mu sync.Mutex +} + +var ( + // DefaultKeychain implements Keychain by interpreting the docker config file. + DefaultKeychain Keychain = &defaultKeychain{} +) + +const ( + // DefaultAuthKey is the key used for dockerhub in config files, which + // is hardcoded for historical reasons. + DefaultAuthKey = "https://" + name.DefaultRegistry + "/v1/" +) + +// Resolve implements Keychain. +func (dk *defaultKeychain) Resolve(target Resource) (Authenticator, error) { + dk.mu.Lock() + defer dk.mu.Unlock() + + // Podman users may have their container registry auth configured in a + // different location, that Docker packages aren't aware of. + // If the Docker config file isn't found, we'll fallback to look where + // Podman configures it, and parse that as a Docker auth config instead. + + // First, check $HOME/.docker/config.json + foundDockerConfig := false + home, err := homedir.Dir() + if err == nil { + foundDockerConfig = fileExists(filepath.Join(home, ".docker/config.json")) + } + // If $HOME/.docker/config.json isn't found, check $DOCKER_CONFIG (if set) + if !foundDockerConfig && os.Getenv("DOCKER_CONFIG") != "" { + foundDockerConfig = fileExists(filepath.Join(os.Getenv("DOCKER_CONFIG"), "config.json")) + } + // If either of those locations are found, load it using Docker's + // config.Load, which may fail if the config can't be parsed. + // + // If neither was found, look for Podman's auth at + // $XDG_RUNTIME_DIR/containers/auth.json and attempt to load it as a + // Docker config. + // + // If neither are found, fallback to Anonymous. + var cf *configfile.ConfigFile + if foundDockerConfig { + cf, err = config.Load(os.Getenv("DOCKER_CONFIG")) + if err != nil { + return nil, err + } + } else { + f, err := os.Open(filepath.Join(os.Getenv("XDG_RUNTIME_DIR"), "containers/auth.json")) + if err != nil { + return Anonymous, nil + } + defer f.Close() + cf, err = config.LoadFromReader(f) + if err != nil { + return nil, err + } + } + + // See: + // https://github.com/google/ko/issues/90 + // https://github.com/moby/moby/blob/fc01c2b481097a6057bec3cd1ab2d7b4488c50c4/registry/config.go#L397-L404 + var cfg, empty types.AuthConfig + for _, key := range []string{ + target.String(), + target.RegistryStr(), + } { + if key == name.DefaultRegistry { + key = DefaultAuthKey + } + + cfg, err = cf.GetAuthConfig(key) + if err != nil { + return nil, err + } + // cf.GetAuthConfig automatically sets the ServerAddress attribute. Since + // we don't make use of it, clear the value for a proper "is-empty" test. + // See: https://github.com/google/go-containerregistry/issues/1510 + cfg.ServerAddress = "" + if cfg != empty { + break + } + } + if cfg == empty { + return Anonymous, nil + } + + return FromConfig(AuthConfig{ + Username: cfg.Username, + Password: cfg.Password, + Auth: cfg.Auth, + IdentityToken: cfg.IdentityToken, + RegistryToken: cfg.RegistryToken, + }), nil +} + +// fileExists returns true if the given path exists and is not a directory. +func fileExists(path string) bool { + fi, err := os.Stat(path) + return err == nil && !fi.IsDir() +} + +// Helper is a subset of the Docker credential helper credentials.Helper +// interface used by NewKeychainFromHelper. +// +// See: +// https://pkg.go.dev/github.com/docker/docker-credential-helpers/credentials#Helper +type Helper interface { + Get(serverURL string) (string, string, error) +} + +// NewKeychainFromHelper returns a Keychain based on a Docker credential helper +// implementation that can Get username and password credentials for a given +// server URL. +func NewKeychainFromHelper(h Helper) Keychain { return wrapper{h} } + +type wrapper struct{ h Helper } + +func (w wrapper) Resolve(r Resource) (Authenticator, error) { + u, p, err := w.h.Get(r.RegistryStr()) + if err != nil { + return Anonymous, nil + } + // If the secret being stored is an identity token, the Username should be set to + // ref: https://docs.docker.com/engine/reference/commandline/login/#credential-helper-protocol + if u == "" { + return FromConfig(AuthConfig{Username: u, IdentityToken: p}), nil + } + return FromConfig(AuthConfig{Username: u, Password: p}), nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go b/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go new file mode 100644 index 00000000..3b1804f5 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go @@ -0,0 +1,41 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +type multiKeychain struct { + keychains []Keychain +} + +// Assert that our multi-keychain implements Keychain. +var _ (Keychain) = (*multiKeychain)(nil) + +// NewMultiKeychain composes a list of keychains into one new keychain. +func NewMultiKeychain(kcs ...Keychain) Keychain { + return &multiKeychain{keychains: kcs} +} + +// Resolve implements Keychain. +func (mk *multiKeychain) Resolve(target Resource) (Authenticator, error) { + for _, kc := range mk.keychains { + auth, err := kc.Resolve(target) + if err != nil { + return nil, err + } + if auth != Anonymous { + return auth, nil + } + } + return Anonymous, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/compression/compression.go b/vendor/github.com/google/go-containerregistry/pkg/compression/compression.go new file mode 100644 index 00000000..6686c2d8 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/compression/compression.go @@ -0,0 +1,26 @@ +// Copyright 2022 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compression abstracts over gzip and zstd. +package compression + +// Compression is an enumeration of the supported compression algorithms +type Compression string + +// The collection of known MediaType values. +const ( + None Compression = "none" + GZip Compression = "gzip" + ZStd Compression = "zstd" +) diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/append.go b/vendor/github.com/google/go-containerregistry/pkg/crane/append.go new file mode 100644 index 00000000..f1c2ef69 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/crane/append.go @@ -0,0 +1,114 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crane + +import ( + "fmt" + "os" + + "github.com/google/go-containerregistry/internal/windows" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/stream" + "github.com/google/go-containerregistry/pkg/v1/tarball" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +func isWindows(img v1.Image) (bool, error) { + cfg, err := img.ConfigFile() + if err != nil { + return false, err + } + return cfg != nil && cfg.OS == "windows", nil +} + +// Append reads a layer from path and appends it the the v1.Image base. +// +// If the base image is a Windows base image (i.e., its config.OS is +// "windows"), the contents of the tarballs will be modified to be suitable for +// a Windows container image.`, +func Append(base v1.Image, paths ...string) (v1.Image, error) { + if base == nil { + return nil, fmt.Errorf("invalid argument: base") + } + + win, err := isWindows(base) + if err != nil { + return nil, fmt.Errorf("getting base image: %w", err) + } + + baseMediaType, err := base.MediaType() + + if err != nil { + return nil, fmt.Errorf("getting base image media type: %w", err) + } + + layerType := types.DockerLayer + + if baseMediaType == types.OCIManifestSchema1 { + layerType = types.OCILayer + } + + layers := make([]v1.Layer, 0, len(paths)) + for _, path := range paths { + layer, err := getLayer(path, layerType) + if err != nil { + return nil, fmt.Errorf("reading layer %q: %w", path, err) + } + + if win { + layer, err = windows.Windows(layer) + if err != nil { + return nil, fmt.Errorf("converting %q for Windows: %w", path, err) + } + } + + layers = append(layers, layer) + } + + return mutate.AppendLayers(base, layers...) +} + +func getLayer(path string, layerType types.MediaType) (v1.Layer, error) { + f, err := streamFile(path) + if err != nil { + return nil, err + } + if f != nil { + return stream.NewLayer(f, stream.WithMediaType(layerType)), nil + } + + return tarball.LayerFromFile(path, tarball.WithMediaType(layerType)) +} + +// If we're dealing with a named pipe, trying to open it multiple times will +// fail, so we need to do a streaming upload. +// +// returns nil, nil for non-streaming files +func streamFile(path string) (*os.File, error) { + if path == "-" { + return os.Stdin, nil + } + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + + if !fi.Mode().IsRegular() { + return os.Open(path) + } + + return nil, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/catalog.go b/vendor/github.com/google/go-containerregistry/pkg/crane/catalog.go new file mode 100644 index 00000000..f30800cc --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/crane/catalog.go @@ -0,0 +1,35 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crane + +import ( + "context" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +// Catalog returns the repositories in a registry's catalog. +func Catalog(src string, opt ...Option) (res []string, err error) { + o := makeOptions(opt...) + reg, err := name.NewRegistry(src, o.Name...) + if err != nil { + return nil, err + } + + // This context gets overridden by remote.WithContext, which is set by + // crane.WithContext. + return remote.Catalog(context.Background(), reg, o.Remote...) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/config.go b/vendor/github.com/google/go-containerregistry/pkg/crane/config.go new file mode 100644 index 00000000..3e55cc93 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/crane/config.go @@ -0,0 +1,24 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crane + +// Config returns the config file for the remote image ref. +func Config(ref string, opt ...Option) ([]byte, error) { + i, _, err := getImage(ref, opt...) + if err != nil { + return nil, err + } + return i.RawConfigFile() +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/copy.go b/vendor/github.com/google/go-containerregistry/pkg/crane/copy.go new file mode 100644 index 00000000..a606f965 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/crane/copy.go @@ -0,0 +1,88 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crane + +import ( + "fmt" + + "github.com/google/go-containerregistry/internal/legacy" + "github.com/google/go-containerregistry/pkg/logs" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Copy copies a remote image or index from src to dst. +func Copy(src, dst string, opt ...Option) error { + o := makeOptions(opt...) + srcRef, err := name.ParseReference(src, o.Name...) + if err != nil { + return fmt.Errorf("parsing reference %q: %w", src, err) + } + + dstRef, err := name.ParseReference(dst, o.Name...) + if err != nil { + return fmt.Errorf("parsing reference for %q: %w", dst, err) + } + + logs.Progress.Printf("Copying from %v to %v", srcRef, dstRef) + desc, err := remote.Get(srcRef, o.Remote...) + if err != nil { + return fmt.Errorf("fetching %q: %w", src, err) + } + + switch desc.MediaType { + case types.OCIImageIndex, types.DockerManifestList: + // Handle indexes separately. + if o.Platform != nil { + // If platform is explicitly set, don't copy the whole index, just the appropriate image. + if err := copyImage(desc, dstRef, o); err != nil { + return fmt.Errorf("failed to copy image: %w", err) + } + } else { + if err := copyIndex(desc, dstRef, o); err != nil { + return fmt.Errorf("failed to copy index: %w", err) + } + } + case types.DockerManifestSchema1, types.DockerManifestSchema1Signed: + // Handle schema 1 images separately. + if err := legacy.CopySchema1(desc, srcRef, dstRef, o.Remote...); err != nil { + return fmt.Errorf("failed to copy schema 1 image: %w", err) + } + default: + // Assume anything else is an image, since some registries don't set mediaTypes properly. + if err := copyImage(desc, dstRef, o); err != nil { + return fmt.Errorf("failed to copy image: %w", err) + } + } + + return nil +} + +func copyImage(desc *remote.Descriptor, dstRef name.Reference, o Options) error { + img, err := desc.Image() + if err != nil { + return err + } + return remote.Write(dstRef, img, o.Remote...) +} + +func copyIndex(desc *remote.Descriptor, dstRef name.Reference, o Options) error { + idx, err := desc.ImageIndex() + if err != nil { + return err + } + return remote.WriteIndex(dstRef, idx, o.Remote...) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/delete.go b/vendor/github.com/google/go-containerregistry/pkg/crane/delete.go new file mode 100644 index 00000000..58a8be1f --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/crane/delete.go @@ -0,0 +1,33 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crane + +import ( + "fmt" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +// Delete deletes the remote reference at src. +func Delete(src string, opt ...Option) error { + o := makeOptions(opt...) + ref, err := name.ParseReference(src, o.Name...) + if err != nil { + return fmt.Errorf("parsing reference %q: %w", src, err) + } + + return remote.Delete(ref, o.Remote...) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/digest.go b/vendor/github.com/google/go-containerregistry/pkg/crane/digest.go new file mode 100644 index 00000000..868a5701 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/crane/digest.go @@ -0,0 +1,52 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crane + +import "github.com/google/go-containerregistry/pkg/logs" + +// Digest returns the sha256 hash of the remote image at ref. +func Digest(ref string, opt ...Option) (string, error) { + o := makeOptions(opt...) + if o.Platform != nil { + desc, err := getManifest(ref, opt...) + if err != nil { + return "", err + } + if !desc.MediaType.IsIndex() { + return desc.Digest.String(), nil + } + + // TODO: does not work for indexes which contain schema v1 manifests + img, err := desc.Image() + if err != nil { + return "", err + } + digest, err := img.Digest() + if err != nil { + return "", err + } + return digest.String(), nil + } + desc, err := Head(ref, opt...) + if err != nil { + logs.Warn.Printf("HEAD request failed, falling back on GET: %v", err) + rdesc, err := getManifest(ref, opt...) + if err != nil { + return "", err + } + return rdesc.Digest.String(), nil + } + return desc.Digest.String(), nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/doc.go b/vendor/github.com/google/go-containerregistry/pkg/crane/doc.go new file mode 100644 index 00000000..7602d795 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/crane/doc.go @@ -0,0 +1,16 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package crane holds libraries used to implement the crane CLI. +package crane diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/export.go b/vendor/github.com/google/go-containerregistry/pkg/crane/export.go new file mode 100644 index 00000000..5d6da1dc --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/crane/export.go @@ -0,0 +1,47 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crane + +import ( + "io" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/mutate" +) + +// Export writes the filesystem contents (as a tarball) of img to w. +// If img has a single layer, just write the (uncompressed) contents to w so +// that this "just works" for images that just wrap a single blob. +func Export(img v1.Image, w io.Writer) error { + layers, err := img.Layers() + if err != nil { + return err + } + if len(layers) == 1 { + // If it's a single layer, we don't have to flatten the filesystem. + // An added perk of skipping mutate.Extract here is that this works + // for non-tarball layers. + l := layers[0] + rc, err := l.Uncompressed() + if err != nil { + return err + } + _, err = io.Copy(w, rc) + return err + } + fs := mutate.Extract(img) + _, err = io.Copy(w, fs) + return err +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/filemap.go b/vendor/github.com/google/go-containerregistry/pkg/crane/filemap.go new file mode 100644 index 00000000..36dfc2a6 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/crane/filemap.go @@ -0,0 +1,72 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crane + +import ( + "archive/tar" + "bytes" + "io" + "sort" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/tarball" +) + +// Layer creates a layer from a single file map. These layers are reproducible and consistent. +// A filemap is a path -> file content map representing a file system. +func Layer(filemap map[string][]byte) (v1.Layer, error) { + b := &bytes.Buffer{} + w := tar.NewWriter(b) + + fn := []string{} + for f := range filemap { + fn = append(fn, f) + } + sort.Strings(fn) + + for _, f := range fn { + c := filemap[f] + if err := w.WriteHeader(&tar.Header{ + Name: f, + Size: int64(len(c)), + }); err != nil { + return nil, err + } + if _, err := w.Write(c); err != nil { + return nil, err + } + } + if err := w.Close(); err != nil { + return nil, err + } + + // Return a new copy of the buffer each time it's opened. + return tarball.LayerFromOpener(func() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewBuffer(b.Bytes())), nil + }) +} + +// Image creates a image with the given filemaps as its contents. These images are reproducible and consistent. +// A filemap is a path -> file content map representing a file system. +func Image(filemap map[string][]byte) (v1.Image, error) { + y, err := Layer(filemap) + if err != nil { + return nil, err + } + + return mutate.AppendLayers(empty.Image, y) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/get.go b/vendor/github.com/google/go-containerregistry/pkg/crane/get.go new file mode 100644 index 00000000..1f12f01d --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/crane/get.go @@ -0,0 +1,56 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crane + +import ( + "fmt" + + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +func getImage(r string, opt ...Option) (v1.Image, name.Reference, error) { + o := makeOptions(opt...) + ref, err := name.ParseReference(r, o.Name...) + if err != nil { + return nil, nil, fmt.Errorf("parsing reference %q: %w", r, err) + } + img, err := remote.Image(ref, o.Remote...) + if err != nil { + return nil, nil, fmt.Errorf("reading image %q: %w", ref, err) + } + return img, ref, nil +} + +func getManifest(r string, opt ...Option) (*remote.Descriptor, error) { + o := makeOptions(opt...) + ref, err := name.ParseReference(r, o.Name...) + if err != nil { + return nil, fmt.Errorf("parsing reference %q: %w", r, err) + } + return remote.Get(ref, o.Remote...) +} + +// Head performs a HEAD request for a manifest and returns a content descriptor +// based on the registry's response. +func Head(r string, opt ...Option) (*v1.Descriptor, error) { + o := makeOptions(opt...) + ref, err := name.ParseReference(r, o.Name...) + if err != nil { + return nil, err + } + return remote.Head(ref, o.Remote...) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/list.go b/vendor/github.com/google/go-containerregistry/pkg/crane/list.go new file mode 100644 index 00000000..38352153 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/crane/list.go @@ -0,0 +1,33 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crane + +import ( + "fmt" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +// ListTags returns the tags in repository src. +func ListTags(src string, opt ...Option) ([]string, error) { + o := makeOptions(opt...) + repo, err := name.NewRepository(src, o.Name...) + if err != nil { + return nil, fmt.Errorf("parsing repo %q: %w", src, err) + } + + return remote.List(repo, o.Remote...) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/manifest.go b/vendor/github.com/google/go-containerregistry/pkg/crane/manifest.go new file mode 100644 index 00000000..a54926ae --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/crane/manifest.go @@ -0,0 +1,32 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crane + +// Manifest returns the manifest for the remote image or index ref. +func Manifest(ref string, opt ...Option) ([]byte, error) { + desc, err := getManifest(ref, opt...) + if err != nil { + return nil, err + } + o := makeOptions(opt...) + if o.Platform != nil { + img, err := desc.Image() + if err != nil { + return nil, err + } + return img.RawManifest() + } + return desc.Manifest, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/optimize.go b/vendor/github.com/google/go-containerregistry/pkg/crane/optimize.go new file mode 100644 index 00000000..74c665df --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/crane/optimize.go @@ -0,0 +1,237 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crane + +import ( + "errors" + "fmt" + + "github.com/containerd/stargz-snapshotter/estargz" + "github.com/google/go-containerregistry/pkg/logs" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/google/go-containerregistry/pkg/v1/tarball" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Optimize optimizes a remote image or index from src to dst. +// THIS API IS EXPERIMENTAL AND SUBJECT TO CHANGE WITHOUT WARNING. +func Optimize(src, dst string, prioritize []string, opt ...Option) error { + pset := newStringSet(prioritize) + o := makeOptions(opt...) + srcRef, err := name.ParseReference(src, o.Name...) + if err != nil { + return fmt.Errorf("parsing reference %q: %w", src, err) + } + + dstRef, err := name.ParseReference(dst, o.Name...) + if err != nil { + return fmt.Errorf("parsing reference for %q: %w", dst, err) + } + + logs.Progress.Printf("Optimizing from %v to %v", srcRef, dstRef) + desc, err := remote.Get(srcRef, o.Remote...) + if err != nil { + return fmt.Errorf("fetching %q: %w", src, err) + } + + switch desc.MediaType { + case types.OCIImageIndex, types.DockerManifestList: + // Handle indexes separately. + if o.Platform != nil { + // If platform is explicitly set, don't optimize the whole index, just the appropriate image. + if err := optimizeAndPushImage(desc, dstRef, pset, o); err != nil { + return fmt.Errorf("failed to optimize image: %w", err) + } + } else { + if err := optimizeAndPushIndex(desc, dstRef, pset, o); err != nil { + return fmt.Errorf("failed to optimize index: %w", err) + } + } + + case types.DockerManifestSchema1, types.DockerManifestSchema1Signed: + return errors.New("docker schema 1 images are not supported") + + default: + // Assume anything else is an image, since some registries don't set mediaTypes properly. + if err := optimizeAndPushImage(desc, dstRef, pset, o); err != nil { + return fmt.Errorf("failed to optimize image: %w", err) + } + } + + return nil +} + +func optimizeAndPushImage(desc *remote.Descriptor, dstRef name.Reference, prioritize stringSet, o Options) error { + img, err := desc.Image() + if err != nil { + return err + } + + missing, oimg, err := optimizeImage(img, prioritize) + if err != nil { + return err + } + + if len(missing) > 0 { + return fmt.Errorf("the following prioritized files were missing from image: %v", missing.List()) + } + + return remote.Write(dstRef, oimg, o.Remote...) +} + +func optimizeImage(img v1.Image, prioritize stringSet) (stringSet, v1.Image, error) { + cfg, err := img.ConfigFile() + if err != nil { + return nil, nil, err + } + ocfg := cfg.DeepCopy() + ocfg.History = nil + ocfg.RootFS.DiffIDs = nil + + oimg, err := mutate.ConfigFile(empty.Image, ocfg) + if err != nil { + return nil, nil, err + } + + layers, err := img.Layers() + if err != nil { + return nil, nil, err + } + + missingFromImage := newStringSet(prioritize.List()) + olayers := make([]mutate.Addendum, 0, len(layers)) + for _, layer := range layers { + missingFromLayer := []string{} + olayer, err := tarball.LayerFromOpener(layer.Uncompressed, + tarball.WithEstargz, + tarball.WithEstargzOptions( + estargz.WithPrioritizedFiles(prioritize.List()), + estargz.WithAllowPrioritizeNotFound(&missingFromLayer), + )) + if err != nil { + return nil, nil, err + } + missingFromImage = missingFromImage.Intersection(newStringSet(missingFromLayer)) + + olayers = append(olayers, mutate.Addendum{ + Layer: olayer, + MediaType: types.DockerLayer, + }) + } + + oimg, err = mutate.Append(oimg, olayers...) + if err != nil { + return nil, nil, err + } + return missingFromImage, oimg, nil +} + +func optimizeAndPushIndex(desc *remote.Descriptor, dstRef name.Reference, prioritize stringSet, o Options) error { + idx, err := desc.ImageIndex() + if err != nil { + return err + } + + missing, oidx, err := optimizeIndex(idx, prioritize) + if err != nil { + return err + } + + if len(missing) > 0 { + return fmt.Errorf("the following prioritized files were missing from all images: %v", missing.List()) + } + + return remote.WriteIndex(dstRef, oidx, o.Remote...) +} + +func optimizeIndex(idx v1.ImageIndex, prioritize stringSet) (stringSet, v1.ImageIndex, error) { + im, err := idx.IndexManifest() + if err != nil { + return nil, nil, err + } + + missingFromIndex := newStringSet(prioritize.List()) + + // Build an image for each child from the base and append it to a new index to produce the result. + adds := make([]mutate.IndexAddendum, 0, len(im.Manifests)) + for _, desc := range im.Manifests { + img, err := idx.Image(desc.Digest) + if err != nil { + return nil, nil, err + } + + missingFromImage, oimg, err := optimizeImage(img, prioritize) + if err != nil { + return nil, nil, err + } + missingFromIndex = missingFromIndex.Intersection(missingFromImage) + adds = append(adds, mutate.IndexAddendum{ + Add: oimg, + Descriptor: v1.Descriptor{ + URLs: desc.URLs, + MediaType: desc.MediaType, + Annotations: desc.Annotations, + Platform: desc.Platform, + }, + }) + } + + idxType, err := idx.MediaType() + if err != nil { + return nil, nil, err + } + + return missingFromIndex, mutate.IndexMediaType(mutate.AppendManifests(empty.Index, adds...), idxType), nil +} + +type stringSet map[string]struct{} + +func newStringSet(in []string) stringSet { + ss := stringSet{} + for _, s := range in { + ss[s] = struct{}{} + } + return ss +} + +func (s stringSet) List() []string { + result := make([]string, 0, len(s)) + for k := range s { + result = append(result, k) + } + return result +} + +func (s stringSet) Intersection(rhs stringSet) stringSet { + // To appease ST1016 + lhs := s + + // Make sure len(lhs) >= len(rhs) + if len(lhs) < len(rhs) { + return rhs.Intersection(lhs) + } + + result := stringSet{} + for k := range lhs { + if _, ok := rhs[k]; ok { + result[k] = struct{}{} + } + } + return result +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/options.go b/vendor/github.com/google/go-containerregistry/pkg/crane/options.go new file mode 100644 index 00000000..2e992789 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/crane/options.go @@ -0,0 +1,127 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crane + +import ( + "context" + "net/http" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +// Options hold the options that crane uses when calling other packages. +type Options struct { + Name []name.Option + Remote []remote.Option + Platform *v1.Platform + Keychain authn.Keychain +} + +// GetOptions exposes the underlying []remote.Option, []name.Option, and +// platform, based on the passed Option. Generally, you shouldn't need to use +// this unless you've painted yourself into a dependency corner as we have +// with the crane and gcrane cli packages. +func GetOptions(opts ...Option) Options { + return makeOptions(opts...) +} + +func makeOptions(opts ...Option) Options { + opt := Options{ + Remote: []remote.Option{ + remote.WithAuthFromKeychain(authn.DefaultKeychain), + }, + Keychain: authn.DefaultKeychain, + } + for _, o := range opts { + o(&opt) + } + return opt +} + +// Option is a functional option for crane. +type Option func(*Options) + +// WithTransport is a functional option for overriding the default transport +// for remote operations. +func WithTransport(t http.RoundTripper) Option { + return func(o *Options) { + o.Remote = append(o.Remote, remote.WithTransport(t)) + } +} + +// Insecure is an Option that allows image references to be fetched without TLS. +func Insecure(o *Options) { + o.Name = append(o.Name, name.Insecure) +} + +// WithPlatform is an Option to specify the platform. +func WithPlatform(platform *v1.Platform) Option { + return func(o *Options) { + if platform != nil { + o.Remote = append(o.Remote, remote.WithPlatform(*platform)) + } + o.Platform = platform + } +} + +// WithAuthFromKeychain is a functional option for overriding the default +// authenticator for remote operations, using an authn.Keychain to find +// credentials. +// +// By default, crane will use authn.DefaultKeychain. +func WithAuthFromKeychain(keys authn.Keychain) Option { + return func(o *Options) { + // Replace the default keychain at position 0. + o.Remote[0] = remote.WithAuthFromKeychain(keys) + o.Keychain = keys + } +} + +// WithAuth is a functional option for overriding the default authenticator +// for remote operations. +// +// By default, crane will use authn.DefaultKeychain. +func WithAuth(auth authn.Authenticator) Option { + return func(o *Options) { + // Replace the default keychain at position 0. + o.Remote[0] = remote.WithAuth(auth) + } +} + +// WithUserAgent adds the given string to the User-Agent header for any HTTP +// requests. +func WithUserAgent(ua string) Option { + return func(o *Options) { + o.Remote = append(o.Remote, remote.WithUserAgent(ua)) + } +} + +// WithNondistributable is an option that allows pushing non-distributable +// layers. +func WithNondistributable() Option { + return func(o *Options) { + o.Remote = append(o.Remote, remote.WithNondistributable) + } +} + +// WithContext is a functional option for setting the context. +func WithContext(ctx context.Context) Option { + return func(o *Options) { + o.Remote = append(o.Remote, remote.WithContext(ctx)) + } +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/pull.go b/vendor/github.com/google/go-containerregistry/pkg/crane/pull.go new file mode 100644 index 00000000..7e6e5b7b --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/crane/pull.go @@ -0,0 +1,142 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crane + +import ( + "fmt" + "os" + + legacy "github.com/google/go-containerregistry/pkg/legacy/tarball" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/layout" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/google/go-containerregistry/pkg/v1/tarball" +) + +// Tag applied to images that were pulled by digest. This denotes that the +// image was (probably) never tagged with this, but lets us avoid applying the +// ":latest" tag which might be misleading. +const iWasADigestTag = "i-was-a-digest" + +// Pull returns a v1.Image of the remote image src. +func Pull(src string, opt ...Option) (v1.Image, error) { + o := makeOptions(opt...) + ref, err := name.ParseReference(src, o.Name...) + if err != nil { + return nil, fmt.Errorf("parsing reference %q: %w", src, err) + } + + return remote.Image(ref, o.Remote...) +} + +// Save writes the v1.Image img as a tarball at path with tag src. +func Save(img v1.Image, src, path string) error { + imgMap := map[string]v1.Image{src: img} + return MultiSave(imgMap, path) +} + +// MultiSave writes collection of v1.Image img with tag as a tarball. +func MultiSave(imgMap map[string]v1.Image, path string, opt ...Option) error { + o := makeOptions(opt...) + tagToImage := map[name.Tag]v1.Image{} + + for src, img := range imgMap { + ref, err := name.ParseReference(src, o.Name...) + if err != nil { + return fmt.Errorf("parsing ref %q: %w", src, err) + } + + // WriteToFile wants a tag to write to the tarball, but we might have + // been given a digest. + // If the original ref was a tag, use that. Otherwise, if it was a + // digest, tag the image with :i-was-a-digest instead. + tag, ok := ref.(name.Tag) + if !ok { + d, ok := ref.(name.Digest) + if !ok { + return fmt.Errorf("ref wasn't a tag or digest") + } + tag = d.Repository.Tag(iWasADigestTag) + } + tagToImage[tag] = img + } + // no progress channel (for now) + return tarball.MultiWriteToFile(path, tagToImage) +} + +// PullLayer returns the given layer from a registry. +func PullLayer(ref string, opt ...Option) (v1.Layer, error) { + o := makeOptions(opt...) + digest, err := name.NewDigest(ref, o.Name...) + if err != nil { + return nil, err + } + + return remote.Layer(digest, o.Remote...) +} + +// SaveLegacy writes the v1.Image img as a legacy tarball at path with tag src. +func SaveLegacy(img v1.Image, src, path string) error { + imgMap := map[string]v1.Image{src: img} + return MultiSave(imgMap, path) +} + +// MultiSaveLegacy writes collection of v1.Image img with tag as a legacy tarball. +func MultiSaveLegacy(imgMap map[string]v1.Image, path string) error { + refToImage := map[name.Reference]v1.Image{} + + for src, img := range imgMap { + ref, err := name.ParseReference(src) + if err != nil { + return fmt.Errorf("parsing ref %q: %w", src, err) + } + refToImage[ref] = img + } + + w, err := os.Create(path) + if err != nil { + return err + } + defer w.Close() + + return legacy.MultiWrite(refToImage, w) +} + +// SaveOCI writes the v1.Image img as an OCI Image Layout at path. If a layout +// already exists at that path, it will add the image to the index. +func SaveOCI(img v1.Image, path string) error { + imgMap := map[string]v1.Image{"": img} + return MultiSaveOCI(imgMap, path) +} + +// MultiSaveOCI writes collection of v1.Image img as an OCI Image Layout at path. If a layout +// already exists at that path, it will add the image to the index. +func MultiSaveOCI(imgMap map[string]v1.Image, path string) error { + p, err := layout.FromPath(path) + if err != nil { + p, err = layout.Write(path, empty.Index) + if err != nil { + return err + } + } + for _, img := range imgMap { + if err = p.AppendImage(img); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/push.go b/vendor/github.com/google/go-containerregistry/pkg/crane/push.go new file mode 100644 index 00000000..6d1fbd6c --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/crane/push.go @@ -0,0 +1,65 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crane + +import ( + "fmt" + + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/google/go-containerregistry/pkg/v1/tarball" +) + +// Load reads the tarball at path as a v1.Image. +func Load(path string, opt ...Option) (v1.Image, error) { + return LoadTag(path, "") +} + +// LoadTag reads a tag from the tarball at path as a v1.Image. +// If tag is "", will attempt to read the tarball as a single image. +func LoadTag(path, tag string, opt ...Option) (v1.Image, error) { + if tag == "" { + return tarball.ImageFromPath(path, nil) + } + + o := makeOptions(opt...) + t, err := name.NewTag(tag, o.Name...) + if err != nil { + return nil, fmt.Errorf("parsing tag %q: %w", tag, err) + } + return tarball.ImageFromPath(path, &t) +} + +// Push pushes the v1.Image img to a registry as dst. +func Push(img v1.Image, dst string, opt ...Option) error { + o := makeOptions(opt...) + tag, err := name.ParseReference(dst, o.Name...) + if err != nil { + return fmt.Errorf("parsing reference %q: %w", dst, err) + } + return remote.Write(tag, img, o.Remote...) +} + +// Upload pushes the v1.Layer to a given repo. +func Upload(layer v1.Layer, repo string, opt ...Option) error { + o := makeOptions(opt...) + ref, err := name.NewRepository(repo, o.Name...) + if err != nil { + return fmt.Errorf("parsing repo %q: %w", repo, err) + } + + return remote.WriteLayer(ref, layer, o.Remote...) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/crane/tag.go b/vendor/github.com/google/go-containerregistry/pkg/crane/tag.go new file mode 100644 index 00000000..13bc3958 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/crane/tag.go @@ -0,0 +1,39 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package crane + +import ( + "fmt" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +// Tag adds tag to the remote img. +func Tag(img, tag string, opt ...Option) error { + o := makeOptions(opt...) + ref, err := name.ParseReference(img, o.Name...) + if err != nil { + return fmt.Errorf("parsing reference %q: %w", img, err) + } + desc, err := remote.Get(ref, o.Remote...) + if err != nil { + return fmt.Errorf("fetching %q: %w", img, err) + } + + dst := ref.Context().Tag(tag) + + return remote.Tag(dst, desc, o.Remote...) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/legacy/config.go b/vendor/github.com/google/go-containerregistry/pkg/legacy/config.go new file mode 100644 index 00000000..3364bec6 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/legacy/config.go @@ -0,0 +1,33 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package legacy + +import ( + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +// LayerConfigFile is the configuration file that holds the metadata describing +// a v1 layer. See: +// https://github.com/moby/moby/blob/master/image/spec/v1.md +type LayerConfigFile struct { + v1.ConfigFile + + ContainerConfig v1.Config `json:"container_config,omitempty"` + + ID string `json:"id,omitempty"` + Parent string `json:"parent,omitempty"` + Throwaway bool `json:"throwaway,omitempty"` + Comment string `json:"comment,omitempty"` +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/legacy/doc.go b/vendor/github.com/google/go-containerregistry/pkg/legacy/doc.go new file mode 100644 index 00000000..1d166888 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/legacy/doc.go @@ -0,0 +1,18 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package legacy provides functionality to work with docker images in the v1 +// format. +// See: https://github.com/moby/moby/blob/master/image/spec/v1.md +package legacy diff --git a/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/README.md b/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/README.md new file mode 100644 index 00000000..90b88c75 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/README.md @@ -0,0 +1,6 @@ +# `legacy/tarball` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/legacy/tarball?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/legacy/tarball) + +This package implements support for writing legacy tarballs, as described +[here](https://github.com/moby/moby/blob/749d90e10f989802638ae542daf54257f3bf71f2/image/spec/v1.2.md#combined-image-json--filesystem-changeset-format). diff --git a/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/doc.go b/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/doc.go new file mode 100644 index 00000000..62684d6e --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/doc.go @@ -0,0 +1,18 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tarball provides facilities for writing v1 docker images +// (https://github.com/moby/moby/blob/master/image/spec/v1.md) from/to a tarball +// on-disk. +package tarball diff --git a/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/write.go b/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/write.go new file mode 100644 index 00000000..2a2b73c1 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/legacy/tarball/write.go @@ -0,0 +1,375 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tarball + +import ( + "archive/tar" + "bytes" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "sort" + "strings" + + "github.com/google/go-containerregistry/pkg/legacy" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/tarball" +) + +// repositoriesTarDescriptor represents the repositories file inside a `docker save` tarball. +type repositoriesTarDescriptor map[string]map[string]string + +// v1Layer represents a layer with metadata needed by the v1 image spec https://github.com/moby/moby/blob/master/image/spec/v1.md. +type v1Layer struct { + // config is the layer metadata. + config *legacy.LayerConfigFile + // layer is the v1.Layer object this v1Layer represents. + layer v1.Layer +} + +// json returns the raw bytes of the json metadata of the given v1Layer. +func (l *v1Layer) json() ([]byte, error) { + return json.Marshal(l.config) +} + +// version returns the raw bytes of the "VERSION" file of the given v1Layer. +func (l *v1Layer) version() []byte { + return []byte("1.0") +} + +// v1LayerID computes the v1 image format layer id for the given v1.Layer with the given v1 parent ID and raw image config. +func v1LayerID(layer v1.Layer, parentID string, rawConfig []byte) (string, error) { + d, err := layer.Digest() + if err != nil { + return "", fmt.Errorf("unable to get layer digest to generate v1 layer ID: %w", err) + } + s := fmt.Sprintf("%s %s", d.Hex, parentID) + if len(rawConfig) != 0 { + s = fmt.Sprintf("%s %s", s, string(rawConfig)) + } + rawDigest := sha256.Sum256([]byte(s)) + return hex.EncodeToString(rawDigest[:]), nil +} + +// newTopV1Layer creates a new v1Layer for a layer other than the top layer in a v1 image tarball. +func newV1Layer(layer v1.Layer, parent *v1Layer, history v1.History) (*v1Layer, error) { + parentID := "" + if parent != nil { + parentID = parent.config.ID + } + id, err := v1LayerID(layer, parentID, nil) + if err != nil { + return nil, fmt.Errorf("unable to generate v1 layer ID: %w", err) + } + result := &v1Layer{ + layer: layer, + config: &legacy.LayerConfigFile{ + ConfigFile: v1.ConfigFile{ + Created: history.Created, + Author: history.Author, + }, + ContainerConfig: v1.Config{ + Cmd: []string{history.CreatedBy}, + }, + ID: id, + Parent: parentID, + Throwaway: history.EmptyLayer, + Comment: history.Comment, + }, + } + return result, nil +} + +// newTopV1Layer creates a new v1Layer for the top layer in a v1 image tarball. +func newTopV1Layer(layer v1.Layer, parent *v1Layer, history v1.History, imgConfig *v1.ConfigFile, rawConfig []byte) (*v1Layer, error) { + result, err := newV1Layer(layer, parent, history) + if err != nil { + return nil, err + } + id, err := v1LayerID(layer, result.config.Parent, rawConfig) + if err != nil { + return nil, fmt.Errorf("unable to generate v1 layer ID for top layer: %w", err) + } + result.config.ID = id + result.config.Architecture = imgConfig.Architecture + result.config.Container = imgConfig.Container + result.config.DockerVersion = imgConfig.DockerVersion + result.config.OS = imgConfig.OS + result.config.Config = imgConfig.Config + result.config.Created = imgConfig.Created + return result, nil +} + +// splitTag splits the given tagged image name /: +// into / and . +func splitTag(name string) (string, string) { + // Split on ":" + parts := strings.Split(name, ":") + // Verify that we aren't confusing a tag for a hostname w/ port for the purposes of weak validation. + if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], "/") { + base := strings.Join(parts[:len(parts)-1], ":") + tag := parts[len(parts)-1] + return base, tag + } + return name, "" +} + +// addTags adds the given image tags to the given "repositories" file descriptor in a v1 image tarball. +func addTags(repos repositoriesTarDescriptor, tags []string, topLayerID string) { + for _, t := range tags { + base, tag := splitTag(t) + tagToID, ok := repos[base] + if !ok { + tagToID = make(map[string]string) + repos[base] = tagToID + } + tagToID[tag] = topLayerID + } +} + +// updateLayerSources updates the given layer digest to descriptor map with the descriptor of the given layer in the given image if it's an undistributable layer. +func updateLayerSources(layerSources map[v1.Hash]v1.Descriptor, layer v1.Layer, img v1.Image) error { + d, err := layer.Digest() + if err != nil { + return err + } + // Add to LayerSources if it's a foreign layer. + desc, err := partial.BlobDescriptor(img, d) + if err != nil { + return err + } + if !desc.MediaType.IsDistributable() { + diffid, err := partial.BlobToDiffID(img, d) + if err != nil { + return err + } + layerSources[diffid] = *desc + } + return nil +} + +// Write is a wrapper to write a single image in V1 format and tag to a tarball. +func Write(ref name.Reference, img v1.Image, w io.Writer) error { + return MultiWrite(map[name.Reference]v1.Image{ref: img}, w) +} + +// filterEmpty filters out the history corresponding to empty layers from the +// given history. +func filterEmpty(h []v1.History) []v1.History { + result := []v1.History{} + for _, i := range h { + if i.EmptyLayer { + continue + } + result = append(result, i) + } + return result +} + +// MultiWrite writes the contents of each image to the provided reader, in the V1 image tarball format. +// The contents are written in the following format: +// One manifest.json file at the top level containing information about several images. +// One repositories file mapping from the image / to to the id of the top most layer. +// For every layer, a directory named with the layer ID is created with the following contents: +// +// layer.tar - The uncompressed layer tarball. +// .json- Layer metadata json. +// VERSION- Schema version string. Always set to "1.0". +// +// One file for the config blob, named after its SHA. +func MultiWrite(refToImage map[name.Reference]v1.Image, w io.Writer) error { + tf := tar.NewWriter(w) + defer tf.Close() + + sortedImages, imageToTags := dedupRefToImage(refToImage) + var m tarball.Manifest + repos := make(repositoriesTarDescriptor) + + seenLayerIDs := make(map[string]struct{}) + for _, img := range sortedImages { + tags := imageToTags[img] + + // Write the config. + cfgName, err := img.ConfigName() + if err != nil { + return err + } + cfgFileName := fmt.Sprintf("%s.json", cfgName.Hex) + cfgBlob, err := img.RawConfigFile() + if err != nil { + return err + } + if err := writeTarEntry(tf, cfgFileName, bytes.NewReader(cfgBlob), int64(len(cfgBlob))); err != nil { + return err + } + cfg, err := img.ConfigFile() + if err != nil { + return err + } + + // Store foreign layer info. + layerSources := make(map[v1.Hash]v1.Descriptor) + + // Write the layers. + layers, err := img.Layers() + if err != nil { + return err + } + history := filterEmpty(cfg.History) + // Create a blank config history if the config didn't have a history. + if len(history) == 0 && len(layers) != 0 { + history = make([]v1.History, len(layers)) + } else if len(layers) != len(history) { + return fmt.Errorf("image config had layer history which did not match the number of layers, got len(history)=%d, len(layers)=%d, want len(history)=len(layers)", len(history), len(layers)) + } + layerFiles := make([]string, len(layers)) + var prev *v1Layer + for i, l := range layers { + if err := updateLayerSources(layerSources, l, img); err != nil { + return fmt.Errorf("unable to update image metadata to include undistributable layer source information: %w", err) + } + var cur *v1Layer + if i < (len(layers) - 1) { + cur, err = newV1Layer(l, prev, history[i]) + } else { + cur, err = newTopV1Layer(l, prev, history[i], cfg, cfgBlob) + } + if err != nil { + return err + } + layerFiles[i] = fmt.Sprintf("%s/layer.tar", cur.config.ID) + if _, ok := seenLayerIDs[cur.config.ID]; ok { + prev = cur + continue + } + seenLayerIDs[cur.config.ID] = struct{}{} + + // If the v1.Layer implements UncompressedSize efficiently, use that + // for the tar header. Otherwise, this iterates over Uncompressed(). + // NOTE: If using a streaming layer, this may consume the layer. + size, err := partial.UncompressedSize(l) + if err != nil { + return err + } + u, err := l.Uncompressed() + if err != nil { + return err + } + defer u.Close() + if err := writeTarEntry(tf, layerFiles[i], u, size); err != nil { + return err + } + + j, err := cur.json() + if err != nil { + return err + } + if err := writeTarEntry(tf, fmt.Sprintf("%s/json", cur.config.ID), bytes.NewReader(j), int64(len(j))); err != nil { + return err + } + v := cur.version() + if err := writeTarEntry(tf, fmt.Sprintf("%s/VERSION", cur.config.ID), bytes.NewReader(v), int64(len(v))); err != nil { + return err + } + prev = cur + } + + // Generate the tar descriptor and write it. + m = append(m, tarball.Descriptor{ + Config: cfgFileName, + RepoTags: tags, + Layers: layerFiles, + LayerSources: layerSources, + }) + // prev should be the top layer here. Use it to add the image tags + // to the tarball repositories file. + addTags(repos, tags, prev.config.ID) + } + + mBytes, err := json.Marshal(m) + if err != nil { + return err + } + + if err := writeTarEntry(tf, "manifest.json", bytes.NewReader(mBytes), int64(len(mBytes))); err != nil { + return err + } + reposBytes, err := json.Marshal(&repos) + if err != nil { + return err + } + if err := writeTarEntry(tf, "repositories", bytes.NewReader(reposBytes), int64(len(reposBytes))); err != nil { + return err + } + return nil +} + +func dedupRefToImage(refToImage map[name.Reference]v1.Image) ([]v1.Image, map[v1.Image][]string) { + imageToTags := make(map[v1.Image][]string) + + for ref, img := range refToImage { + if tag, ok := ref.(name.Tag); ok { + if tags, ok := imageToTags[img]; ok && tags != nil { + imageToTags[img] = append(tags, tag.String()) + } else { + imageToTags[img] = []string{tag.String()} + } + } else { + if _, ok := imageToTags[img]; !ok { + imageToTags[img] = nil + } + } + } + + // Force specific order on tags + imgs := []v1.Image{} + for img, tags := range imageToTags { + sort.Strings(tags) + imgs = append(imgs, img) + } + + sort.Slice(imgs, func(i, j int) bool { + cfI, err := imgs[i].ConfigName() + if err != nil { + return false + } + cfJ, err := imgs[j].ConfigName() + if err != nil { + return false + } + return cfI.Hex < cfJ.Hex + }) + + return imgs, imageToTags +} + +// Writes a file to the provided writer with a corresponding tar header +func writeTarEntry(tf *tar.Writer, path string, r io.Reader, size int64) error { + hdr := &tar.Header{ + Mode: 0644, + Typeflag: tar.TypeReg, + Size: size, + Name: path, + } + if err := tf.WriteHeader(hdr); err != nil { + return err + } + _, err := io.Copy(tf, r) + return err +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go b/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go new file mode 100644 index 00000000..a5d25b18 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/logs/logs.go @@ -0,0 +1,39 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package logs exposes the loggers used by this library. +package logs + +import ( + "io" + "log" +) + +var ( + // Warn is used to log non-fatal errors. + Warn = log.New(io.Discard, "", log.LstdFlags) + + // Progress is used to log notable, successful events. + Progress = log.New(io.Discard, "", log.LstdFlags) + + // Debug is used to log information that is useful for debugging. + Debug = log.New(io.Discard, "", log.LstdFlags) +) + +// Enabled checks to see if the logger's writer is set to something other +// than io.Discard. This allows callers to avoid expensive operations +// that will end up in /dev/null anyway. +func Enabled(l *log.Logger) bool { + return l.Writer() != io.Discard +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/README.md b/vendor/github.com/google/go-containerregistry/pkg/name/README.md new file mode 100644 index 00000000..4889b844 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/README.md @@ -0,0 +1,3 @@ +# `name` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/name?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/name) diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/check.go b/vendor/github.com/google/go-containerregistry/pkg/name/check.go new file mode 100644 index 00000000..e9a240a3 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/check.go @@ -0,0 +1,43 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "strings" + "unicode/utf8" +) + +// stripRunesFn returns a function which returns -1 (i.e. a value which +// signals deletion in strings.Map) for runes in 'runes', and the rune otherwise. +func stripRunesFn(runes string) func(rune) rune { + return func(r rune) rune { + if strings.ContainsRune(runes, r) { + return -1 + } + return r + } +} + +// checkElement checks a given named element matches character and length restrictions. +// Returns true if the given element adheres to the given restrictions, false otherwise. +func checkElement(name, element, allowedRunes string, minRunes, maxRunes int) error { + numRunes := utf8.RuneCountInString(element) + if (numRunes < minRunes) || (maxRunes < numRunes) { + return newErrBadName("%s must be between %d and %d characters in length: %s", name, minRunes, maxRunes, element) + } else if len(strings.Map(stripRunesFn(allowedRunes), element)) != 0 { + return newErrBadName("%s can only contain the characters `%s`: %s", name, allowedRunes, element) + } + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go new file mode 100644 index 00000000..c4a2e693 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go @@ -0,0 +1,93 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + _ "crypto/sha256" // Recommended by go-digest. + "strings" + + "github.com/opencontainers/go-digest" +) + +const digestDelim = "@" + +// Digest stores a digest name in a structured form. +type Digest struct { + Repository + digest string + original string +} + +// Ensure Digest implements Reference +var _ Reference = (*Digest)(nil) + +// Context implements Reference. +func (d Digest) Context() Repository { + return d.Repository +} + +// Identifier implements Reference. +func (d Digest) Identifier() string { + return d.DigestStr() +} + +// DigestStr returns the digest component of the Digest. +func (d Digest) DigestStr() string { + return d.digest +} + +// Name returns the name from which the Digest was derived. +func (d Digest) Name() string { + return d.Repository.Name() + digestDelim + d.DigestStr() +} + +// String returns the original input string. +func (d Digest) String() string { + return d.original +} + +// NewDigest returns a new Digest representing the given name. +func NewDigest(name string, opts ...Option) (Digest, error) { + // Split on "@" + parts := strings.Split(name, digestDelim) + if len(parts) != 2 { + return Digest{}, newErrBadName("a digest must contain exactly one '@' separator (e.g. registry/repository@digest) saw: %s", name) + } + base := parts[0] + dig := parts[1] + prefix := digest.Canonical.String() + ":" + if !strings.HasPrefix(dig, prefix) { + return Digest{}, newErrBadName("unsupported digest algorithm: %s", dig) + } + hex := strings.TrimPrefix(dig, prefix) + if err := digest.Canonical.Validate(hex); err != nil { + return Digest{}, err + } + + tag, err := NewTag(base, opts...) + if err == nil { + base = tag.Repository.Name() + } + + repo, err := NewRepository(base, opts...) + if err != nil { + return Digest{}, err + } + return Digest{ + Repository: repo, + digest: dig, + original: name, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/doc.go b/vendor/github.com/google/go-containerregistry/pkg/name/doc.go new file mode 100644 index 00000000..b294794d --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/doc.go @@ -0,0 +1,42 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package name defines structured types for representing image references. +// +// What's in a name? For image references, not nearly enough! +// +// Image references look a lot like URLs, but they differ in that they don't +// contain the scheme (http or https), they can end with a :tag or a @digest +// (the latter being validated), and they perform defaulting for missing +// components. +// +// Since image references don't contain the scheme, we do our best to infer +// if we use http or https from the given hostname. We allow http fallback for +// any host that looks like localhost (localhost, 127.0.0.1, ::1), ends in +// ".local", or is in the "private" address space per RFC 1918. For everything +// else, we assume https only. To override this heuristic, use the Insecure +// option. +// +// Image references with a digest signal to us that we should verify the content +// of the image matches the digest. E.g. when pulling a Digest reference, we'll +// calculate the sha256 of the manifest returned by the registry and error out +// if it doesn't match what we asked for. +// +// For defaulting, we interpret "ubuntu" as +// "index.docker.io/library/ubuntu:latest" because we add the missing repo +// "library", the missing registry "index.docker.io", and the missing tag +// "latest". To disable this defaulting, use the StrictValidation option. This +// is useful e.g. to only allow image references that explicitly set a tag or +// digest, so that you don't accidentally pull "latest". +package name diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/errors.go b/vendor/github.com/google/go-containerregistry/pkg/name/errors.go new file mode 100644 index 00000000..bf004ffc --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/errors.go @@ -0,0 +1,48 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "errors" + "fmt" +) + +// ErrBadName is an error for when a bad docker name is supplied. +type ErrBadName struct { + info string +} + +func (e *ErrBadName) Error() string { + return e.info +} + +// Is reports whether target is an error of type ErrBadName +func (e *ErrBadName) Is(target error) bool { + var berr *ErrBadName + return errors.As(target, &berr) +} + +// newErrBadName returns a ErrBadName which returns the given formatted string from Error(). +func newErrBadName(fmtStr string, args ...any) *ErrBadName { + return &ErrBadName{fmt.Sprintf(fmtStr, args...)} +} + +// IsErrBadName returns true if the given error is an ErrBadName. +// +// Deprecated: Use errors.Is. +func IsErrBadName(err error) bool { + var berr *ErrBadName + return errors.As(err, &berr) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/options.go b/vendor/github.com/google/go-containerregistry/pkg/name/options.go new file mode 100644 index 00000000..d14fedcd --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/options.go @@ -0,0 +1,83 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +const ( + // DefaultRegistry is the registry name that will be used if no registry + // provided and the default is not overridden. + DefaultRegistry = "index.docker.io" + defaultRegistryAlias = "docker.io" + + // DefaultTag is the tag name that will be used if no tag provided and the + // default is not overridden. + DefaultTag = "latest" +) + +type options struct { + strict bool // weak by default + insecure bool // secure by default + defaultRegistry string + defaultTag string +} + +func makeOptions(opts ...Option) options { + opt := options{ + defaultRegistry: DefaultRegistry, + defaultTag: DefaultTag, + } + for _, o := range opts { + o(&opt) + } + return opt +} + +// Option is a functional option for name parsing. +type Option func(*options) + +// StrictValidation is an Option that requires image references to be fully +// specified; i.e. no defaulting for registry (dockerhub), repo (library), +// or tag (latest). +func StrictValidation(opts *options) { + opts.strict = true +} + +// WeakValidation is an Option that sets defaults when parsing names, see +// StrictValidation. +func WeakValidation(opts *options) { + opts.strict = false +} + +// Insecure is an Option that allows image references to be fetched without TLS. +func Insecure(opts *options) { + opts.insecure = true +} + +// OptionFn is a function that returns an option. +type OptionFn func() Option + +// WithDefaultRegistry sets the default registry that will be used if one is not +// provided. +func WithDefaultRegistry(r string) Option { + return func(opts *options) { + opts.defaultRegistry = r + } +} + +// WithDefaultTag sets the default tag that will be used if one is not provided. +func WithDefaultTag(t string) Option { + return func(opts *options) { + opts.defaultTag = t + } +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/ref.go b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go new file mode 100644 index 00000000..912ab330 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/ref.go @@ -0,0 +1,75 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "fmt" +) + +// Reference defines the interface that consumers use when they can +// take either a tag or a digest. +type Reference interface { + fmt.Stringer + + // Context accesses the Repository context of the reference. + Context() Repository + + // Identifier accesses the type-specific portion of the reference. + Identifier() string + + // Name is the fully-qualified reference name. + Name() string + + // Scope is the scope needed to access this reference. + Scope(string) string +} + +// ParseReference parses the string as a reference, either by tag or digest. +func ParseReference(s string, opts ...Option) (Reference, error) { + if t, err := NewTag(s, opts...); err == nil { + return t, nil + } + if d, err := NewDigest(s, opts...); err == nil { + return d, nil + } + return nil, newErrBadName("could not parse reference: " + s) +} + +type stringConst string + +// MustParseReference behaves like ParseReference, but panics instead of +// returning an error. It's intended for use in tests, or when a value is +// expected to be valid at code authoring time. +// +// To discourage its use in scenarios where the value is not known at code +// authoring time, it must be passed a string constant: +// +// const str = "valid/string" +// MustParseReference(str) +// MustParseReference("another/valid/string") +// MustParseReference(str + "/and/more") +// +// These will not compile: +// +// var str = "valid/string" +// MustParseReference(str) +// MustParseReference(strings.Join([]string{"valid", "string"}, "/")) +func MustParseReference(s stringConst, opts ...Option) Reference { + ref, err := ParseReference(string(s), opts...) + if err != nil { + panic(err) + } + return ref +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go new file mode 100644 index 00000000..2a26b66d --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go @@ -0,0 +1,136 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "net" + "net/url" + "regexp" + "strings" +) + +// Detect more complex forms of local references. +var reLocal = regexp.MustCompile(`.*\.local(?:host)?(?::\d{1,5})?$`) + +// Detect the loopback IP (127.0.0.1) +var reLoopback = regexp.MustCompile(regexp.QuoteMeta("127.0.0.1")) + +// Detect the loopback IPV6 (::1) +var reipv6Loopback = regexp.MustCompile(regexp.QuoteMeta("::1")) + +// Registry stores a docker registry name in a structured form. +type Registry struct { + insecure bool + registry string +} + +// RegistryStr returns the registry component of the Registry. +func (r Registry) RegistryStr() string { + return r.registry +} + +// Name returns the name from which the Registry was derived. +func (r Registry) Name() string { + return r.RegistryStr() +} + +func (r Registry) String() string { + return r.Name() +} + +// Scope returns the scope required to access the registry. +func (r Registry) Scope(string) string { + // The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z + return "registry:catalog:*" +} + +func (r Registry) isRFC1918() bool { + ipStr := strings.Split(r.Name(), ":")[0] + ip := net.ParseIP(ipStr) + if ip == nil { + return false + } + for _, cidr := range []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"} { + _, block, _ := net.ParseCIDR(cidr) + if block.Contains(ip) { + return true + } + } + return false +} + +// Scheme returns https scheme for all the endpoints except localhost or when explicitly defined. +func (r Registry) Scheme() string { + if r.insecure { + return "http" + } + if r.isRFC1918() { + return "http" + } + if strings.HasPrefix(r.Name(), "localhost:") { + return "http" + } + if reLocal.MatchString(r.Name()) { + return "http" + } + if reLoopback.MatchString(r.Name()) { + return "http" + } + if reipv6Loopback.MatchString(r.Name()) { + return "http" + } + return "https" +} + +func checkRegistry(name string) error { + // Per RFC 3986, registries (authorities) are required to be prefixed with "//" + // url.Host == hostname[:port] == authority + if url, err := url.Parse("//" + name); err != nil || url.Host != name { + return newErrBadName("registries must be valid RFC 3986 URI authorities: %s", name) + } + return nil +} + +// NewRegistry returns a Registry based on the given name. +// Strict validation requires explicit, valid RFC 3986 URI authorities to be given. +func NewRegistry(name string, opts ...Option) (Registry, error) { + opt := makeOptions(opts...) + if opt.strict && len(name) == 0 { + return Registry{}, newErrBadName("strict validation requires the registry to be explicitly defined") + } + + if err := checkRegistry(name); err != nil { + return Registry{}, err + } + + if name == "" { + name = opt.defaultRegistry + } + // Rewrite "docker.io" to "index.docker.io". + // See: https://github.com/google/go-containerregistry/issues/68 + if name == defaultRegistryAlias { + name = DefaultRegistry + } + + return Registry{registry: name, insecure: opt.insecure}, nil +} + +// NewInsecureRegistry returns an Insecure Registry based on the given name. +// +// Deprecated: Use the Insecure Option with NewRegistry instead. +func NewInsecureRegistry(name string, opts ...Option) (Registry, error) { + opts = append(opts, Insecure) + return NewRegistry(name, opts...) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/repository.go b/vendor/github.com/google/go-containerregistry/pkg/name/repository.go new file mode 100644 index 00000000..9250e362 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/repository.go @@ -0,0 +1,121 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "fmt" + "strings" +) + +const ( + defaultNamespace = "library" + repositoryChars = "abcdefghijklmnopqrstuvwxyz0123456789_-./" + regRepoDelimiter = "/" +) + +// Repository stores a docker repository name in a structured form. +type Repository struct { + Registry + repository string +} + +// See https://docs.docker.com/docker-hub/official_repos +func hasImplicitNamespace(repo string, reg Registry) bool { + return !strings.ContainsRune(repo, '/') && reg.RegistryStr() == DefaultRegistry +} + +// RepositoryStr returns the repository component of the Repository. +func (r Repository) RepositoryStr() string { + if hasImplicitNamespace(r.repository, r.Registry) { + return fmt.Sprintf("%s/%s", defaultNamespace, r.repository) + } + return r.repository +} + +// Name returns the name from which the Repository was derived. +func (r Repository) Name() string { + regName := r.Registry.Name() + if regName != "" { + return regName + regRepoDelimiter + r.RepositoryStr() + } + // TODO: As far as I can tell, this is unreachable. + return r.RepositoryStr() +} + +func (r Repository) String() string { + return r.Name() +} + +// Scope returns the scope required to perform the given action on the registry. +// TODO(jonjohnsonjr): consider moving scopes to a separate package. +func (r Repository) Scope(action string) string { + return fmt.Sprintf("repository:%s:%s", r.RepositoryStr(), action) +} + +func checkRepository(repository string) error { + return checkElement("repository", repository, repositoryChars, 2, 255) +} + +// NewRepository returns a new Repository representing the given name, according to the given strictness. +func NewRepository(name string, opts ...Option) (Repository, error) { + opt := makeOptions(opts...) + if len(name) == 0 { + return Repository{}, newErrBadName("a repository name must be specified") + } + + var registry string + repo := name + parts := strings.SplitN(name, regRepoDelimiter, 2) + if len(parts) == 2 && (strings.ContainsRune(parts[0], '.') || strings.ContainsRune(parts[0], ':')) { + // The first part of the repository is treated as the registry domain + // iff it contains a '.' or ':' character, otherwise it is all repository + // and the domain defaults to Docker Hub. + registry = parts[0] + repo = parts[1] + } + + if err := checkRepository(repo); err != nil { + return Repository{}, err + } + + reg, err := NewRegistry(registry, opts...) + if err != nil { + return Repository{}, err + } + if hasImplicitNamespace(repo, reg) && opt.strict { + return Repository{}, newErrBadName("strict validation requires the full repository path (missing 'library')") + } + return Repository{reg, repo}, nil +} + +// Tag returns a Tag in this Repository. +func (r Repository) Tag(identifier string) Tag { + t := Tag{ + tag: identifier, + Repository: r, + } + t.original = t.Name() + return t +} + +// Digest returns a Digest in this Repository. +func (r Repository) Digest(identifier string) Digest { + d := Digest{ + digest: identifier, + Repository: r, + } + d.original = d.Name() + return d +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/tag.go b/vendor/github.com/google/go-containerregistry/pkg/name/tag.go new file mode 100644 index 00000000..66bd1bec --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/name/tag.go @@ -0,0 +1,108 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "strings" +) + +const ( + // TODO(dekkagaijin): use the docker/distribution regexes for validation. + tagChars = "abcdefghijklmnopqrstuvwxyz0123456789_-.ABCDEFGHIJKLMNOPQRSTUVWXYZ" + tagDelim = ":" +) + +// Tag stores a docker tag name in a structured form. +type Tag struct { + Repository + tag string + original string +} + +// Ensure Tag implements Reference +var _ Reference = (*Tag)(nil) + +// Context implements Reference. +func (t Tag) Context() Repository { + return t.Repository +} + +// Identifier implements Reference. +func (t Tag) Identifier() string { + return t.TagStr() +} + +// TagStr returns the tag component of the Tag. +func (t Tag) TagStr() string { + return t.tag +} + +// Name returns the name from which the Tag was derived. +func (t Tag) Name() string { + return t.Repository.Name() + tagDelim + t.TagStr() +} + +// String returns the original input string. +func (t Tag) String() string { + return t.original +} + +// Scope returns the scope required to perform the given action on the tag. +func (t Tag) Scope(action string) string { + return t.Repository.Scope(action) +} + +func checkTag(name string) error { + return checkElement("tag", name, tagChars, 1, 128) +} + +// NewTag returns a new Tag representing the given name, according to the given strictness. +func NewTag(name string, opts ...Option) (Tag, error) { + opt := makeOptions(opts...) + base := name + tag := "" + + // Split on ":" + parts := strings.Split(name, tagDelim) + // Verify that we aren't confusing a tag for a hostname w/ port for the purposes of weak validation. + if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], regRepoDelimiter) { + base = strings.Join(parts[:len(parts)-1], tagDelim) + tag = parts[len(parts)-1] + } + + // We don't require a tag, but if we get one check it's valid, + // even when not being strict. + // If we are being strict, we want to validate the tag regardless in case + // it's empty. + if tag != "" || opt.strict { + if err := checkTag(tag); err != nil { + return Tag{}, err + } + } + + if tag == "" { + tag = opt.defaultTag + } + + repo, err := NewRepository(base, opts...) + if err != nil { + return Tag{}, err + } + return Tag{ + Repository: repo, + tag: tag, + original: name, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go new file mode 100644 index 00000000..08ec0158 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go @@ -0,0 +1,136 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "encoding/json" + "io" + "time" +) + +// ConfigFile is the configuration file that holds the metadata describing +// how to launch a container. See: +// https://github.com/opencontainers/image-spec/blob/master/config.md +// +// docker_version and os.version are not part of the spec but included +// for backwards compatibility. +type ConfigFile struct { + Architecture string `json:"architecture"` + Author string `json:"author,omitempty"` + Container string `json:"container,omitempty"` + Created Time `json:"created,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + History []History `json:"history,omitempty"` + OS string `json:"os"` + RootFS RootFS `json:"rootfs"` + Config Config `json:"config"` + OSVersion string `json:"os.version,omitempty"` + Variant string `json:"variant,omitempty"` +} + +// History is one entry of a list recording how this container image was built. +type History struct { + Author string `json:"author,omitempty"` + Created Time `json:"created,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Time is a wrapper around time.Time to help with deep copying +type Time struct { + time.Time +} + +// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time +// type is effectively immutable in the time API, so it is safe to +// copy-by-assign, despite the presence of (unexported) Pointer fields. +func (t *Time) DeepCopyInto(out *Time) { + *out = *t +} + +// RootFS holds the ordered list of file system deltas that comprise the +// container image's root filesystem. +type RootFS struct { + Type string `json:"type"` + DiffIDs []Hash `json:"diff_ids"` +} + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config is a submessage of the config file described as: +// +// The execution parameters which SHOULD be used as a base when running +// a container using the image. +// +// The names of the fields in this message are chosen to reflect the JSON +// payload of the Config as defined here: +// https://git.io/vrAET +// and +// https://github.com/opencontainers/image-spec/blob/master/config.md +type Config struct { + AttachStderr bool `json:"AttachStderr,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty"` + Cmd []string `json:"Cmd,omitempty"` + Healthcheck *HealthConfig `json:"Healthcheck,omitempty"` + Domainname string `json:"Domainname,omitempty"` + Entrypoint []string `json:"Entrypoint,omitempty"` + Env []string `json:"Env,omitempty"` + Hostname string `json:"Hostname,omitempty"` + Image string `json:"Image,omitempty"` + Labels map[string]string `json:"Labels,omitempty"` + OnBuild []string `json:"OnBuild,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty"` + Tty bool `json:"Tty,omitempty"` + User string `json:"User,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty"` + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + ArgsEscaped bool `json:"ArgsEscaped,omitempty"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty"` + MacAddress string `json:"MacAddress,omitempty"` + StopSignal string `json:"StopSignal,omitempty"` + Shell []string `json:"Shell,omitempty"` +} + +// ParseConfigFile parses the io.Reader's contents into a ConfigFile. +func ParseConfigFile(r io.Reader) (*ConfigFile, error) { + cf := ConfigFile{} + if err := json.NewDecoder(r).Decode(&cf); err != nil { + return nil, err + } + return &cf, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go new file mode 100644 index 00000000..7a84736b --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go @@ -0,0 +1,18 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package + +// Package v1 defines structured types for OCI v1 images +package v1 diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/README.md new file mode 100644 index 00000000..8663a830 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/README.md @@ -0,0 +1,8 @@ +# `empty` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/empty?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/empty) + +The empty packages provides an empty base for constructing a `v1.Image` or `v1.ImageIndex`. +This is especially useful when paired with the [`mutate`](/pkg/v1/mutate) package, +see [`mutate.Append`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate#Append) +and [`mutate.AppendManifests`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate#AppendManifests). diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/doc.go new file mode 100644 index 00000000..1a521e9a --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package empty provides an implementation of v1.Image equivalent to "FROM scratch". +package empty diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/image.go new file mode 100644 index 00000000..c58a06ce --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/image.go @@ -0,0 +1,52 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package empty + +import ( + "fmt" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Image is a singleton empty image, think: FROM scratch. +var Image, _ = partial.UncompressedToImage(emptyImage{}) + +type emptyImage struct{} + +// MediaType implements partial.UncompressedImageCore. +func (i emptyImage) MediaType() (types.MediaType, error) { + return types.DockerManifestSchema2, nil +} + +// RawConfigFile implements partial.UncompressedImageCore. +func (i emptyImage) RawConfigFile() ([]byte, error) { + return partial.RawConfigFile(i) +} + +// ConfigFile implements v1.Image. +func (i emptyImage) ConfigFile() (*v1.ConfigFile, error) { + return &v1.ConfigFile{ + RootFS: v1.RootFS{ + // Some clients check this. + Type: "layers", + }, + }, nil +} + +func (i emptyImage) LayerByDiffID(h v1.Hash) (partial.UncompressedLayer, error) { + return nil, fmt.Errorf("LayerByDiffID(%s): empty image", h) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go new file mode 100644 index 00000000..8edab24d --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go @@ -0,0 +1,63 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package empty + +import ( + "encoding/json" + "errors" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Index is a singleton empty index, think: FROM scratch. +var Index = emptyIndex{} + +type emptyIndex struct{} + +func (i emptyIndex) MediaType() (types.MediaType, error) { + return types.OCIImageIndex, nil +} + +func (i emptyIndex) Digest() (v1.Hash, error) { + return partial.Digest(i) +} + +func (i emptyIndex) Size() (int64, error) { + return partial.Size(i) +} + +func (i emptyIndex) IndexManifest() (*v1.IndexManifest, error) { + return base(), nil +} + +func (i emptyIndex) RawManifest() ([]byte, error) { + return json.Marshal(base()) +} + +func (i emptyIndex) Image(v1.Hash) (v1.Image, error) { + return nil, errors.New("empty index") +} + +func (i emptyIndex) ImageIndex(v1.Hash) (v1.ImageIndex, error) { + return nil, errors.New("empty index") +} + +func base() *v1.IndexManifest { + return &v1.IndexManifest{ + SchemaVersion: 2, + } +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go new file mode 100644 index 00000000..e9630087 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go @@ -0,0 +1,123 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + "strings" +) + +// Hash is an unqualified digest of some content, e.g. sha256:deadbeef +type Hash struct { + // Algorithm holds the algorithm used to compute the hash. + Algorithm string + + // Hex holds the hex portion of the content hash. + Hex string +} + +// String reverses NewHash returning the string-form of the hash. +func (h Hash) String() string { + return fmt.Sprintf("%s:%s", h.Algorithm, h.Hex) +} + +// NewHash validates the input string is a hash and returns a strongly type Hash object. +func NewHash(s string) (Hash, error) { + h := Hash{} + if err := h.parse(s); err != nil { + return Hash{}, err + } + return h, nil +} + +// MarshalJSON implements json.Marshaler +func (h Hash) MarshalJSON() ([]byte, error) { + return json.Marshal(h.String()) +} + +// UnmarshalJSON implements json.Unmarshaler +func (h *Hash) UnmarshalJSON(data []byte) error { + s, err := strconv.Unquote(string(data)) + if err != nil { + return err + } + return h.parse(s) +} + +// MarshalText implements encoding.TextMarshaler. This is required to use +// v1.Hash as a key in a map when marshalling JSON. +func (h Hash) MarshalText() (text []byte, err error) { + return []byte(h.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. This is required to use +// v1.Hash as a key in a map when unmarshalling JSON. +func (h *Hash) UnmarshalText(text []byte) error { + return h.parse(string(text)) +} + +// Hasher returns a hash.Hash for the named algorithm (e.g. "sha256") +func Hasher(name string) (hash.Hash, error) { + switch name { + case "sha256": + return sha256.New(), nil + default: + return nil, fmt.Errorf("unsupported hash: %q", name) + } +} + +func (h *Hash) parse(unquoted string) error { + parts := strings.Split(unquoted, ":") + if len(parts) != 2 { + return fmt.Errorf("cannot parse hash: %q", unquoted) + } + + rest := strings.TrimLeft(parts[1], "0123456789abcdef") + if len(rest) != 0 { + return fmt.Errorf("found non-hex character in hash: %c", rest[0]) + } + + hasher, err := Hasher(parts[0]) + if err != nil { + return err + } + // Compare the hex to the expected size (2 hex characters per byte) + if len(parts[1]) != hasher.Size()*2 { + return fmt.Errorf("wrong number of hex digits for %s: %s", parts[0], parts[1]) + } + + h.Algorithm = parts[0] + h.Hex = parts[1] + return nil +} + +// SHA256 computes the Hash of the provided io.Reader's content. +func SHA256(r io.Reader) (Hash, int64, error) { + hasher := sha256.New() + n, err := io.Copy(hasher, r) + if err != nil { + return Hash{}, 0, err + } + return Hash{ + Algorithm: "sha256", + Hex: hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), + }, n, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/image.go new file mode 100644 index 00000000..8de9e476 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/image.go @@ -0,0 +1,59 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Image defines the interface for interacting with an OCI v1 image. +type Image interface { + // Layers returns the ordered collection of filesystem layers that comprise this image. + // The order of the list is oldest/base layer first, and most-recent/top layer last. + Layers() ([]Layer, error) + + // MediaType of this image's manifest. + MediaType() (types.MediaType, error) + + // Size returns the size of the manifest. + Size() (int64, error) + + // ConfigName returns the hash of the image's config file, also known as + // the Image ID. + ConfigName() (Hash, error) + + // ConfigFile returns this image's config file. + ConfigFile() (*ConfigFile, error) + + // RawConfigFile returns the serialized bytes of ConfigFile(). + RawConfigFile() ([]byte, error) + + // Digest returns the sha256 of this image's manifest. + Digest() (Hash, error) + + // Manifest returns this image's Manifest object. + Manifest() (*Manifest, error) + + // RawManifest returns the serialized bytes of Manifest() + RawManifest() ([]byte, error) + + // LayerByDigest returns a Layer for interacting with a particular layer of + // the image, looking it up by "digest" (the compressed hash). + LayerByDigest(Hash) (Layer, error) + + // LayerByDiffID is an analog to LayerByDigest, looking up by "diff id" + // (the uncompressed hash). + LayerByDiffID(Hash) (Layer, error) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/index.go new file mode 100644 index 00000000..8e7bc8eb --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/index.go @@ -0,0 +1,43 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// ImageIndex defines the interface for interacting with an OCI image index. +type ImageIndex interface { + // MediaType of this image's manifest. + MediaType() (types.MediaType, error) + + // Digest returns the sha256 of this index's manifest. + Digest() (Hash, error) + + // Size returns the size of the manifest. + Size() (int64, error) + + // IndexManifest returns this image index's manifest object. + IndexManifest() (*IndexManifest, error) + + // RawManifest returns the serialized bytes of IndexManifest(). + RawManifest() ([]byte, error) + + // Image returns a v1.Image that this ImageIndex references. + Image(Hash) (Image, error) + + // ImageIndex returns a v1.ImageIndex that this ImageIndex references. + ImageIndex(Hash) (ImageIndex, error) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go new file mode 100644 index 00000000..57447d26 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go @@ -0,0 +1,42 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "io" + + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Layer is an interface for accessing the properties of a particular layer of a v1.Image +type Layer interface { + // Digest returns the Hash of the compressed layer. + Digest() (Hash, error) + + // DiffID returns the Hash of the uncompressed layer. + DiffID() (Hash, error) + + // Compressed returns an io.ReadCloser for the compressed layer contents. + Compressed() (io.ReadCloser, error) + + // Uncompressed returns an io.ReadCloser for the uncompressed layer contents. + Uncompressed() (io.ReadCloser, error) + + // Size returns the compressed size of the Layer. + Size() (int64, error) + + // MediaType returns the media type of the Layer. + MediaType() (types.MediaType, error) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/README.md new file mode 100644 index 00000000..54bee6d9 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/README.md @@ -0,0 +1,5 @@ +# `layout` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/layout?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/layout) + +The `layout` package implements support for interacting with an [OCI Image Layout](https://github.com/opencontainers/image-spec/blob/master/image-layout.md). diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/blob.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/blob.go new file mode 100644 index 00000000..2e5f4358 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/blob.go @@ -0,0 +1,37 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package layout + +import ( + "io" + "os" + + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +// Blob returns a blob with the given hash from the Path. +func (l Path) Blob(h v1.Hash) (io.ReadCloser, error) { + return os.Open(l.blobPath(h)) +} + +// Bytes is a convenience function to return a blob from the Path as +// a byte slice. +func (l Path) Bytes(h v1.Hash) ([]byte, error) { + return os.ReadFile(l.blobPath(h)) +} + +func (l Path) blobPath(h v1.Hash) string { + return l.path("blobs", h.Algorithm, h.Hex) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/doc.go new file mode 100644 index 00000000..d80d2736 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package layout provides facilities for reading/writing artifacts from/to +// an OCI image layout on disk, see: +// +// https://github.com/opencontainers/image-spec/blob/master/image-layout.md +package layout diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/image.go new file mode 100644 index 00000000..c9ae9665 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/image.go @@ -0,0 +1,139 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package layout + +import ( + "fmt" + "io" + "os" + "sync" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type layoutImage struct { + path Path + desc v1.Descriptor + manifestLock sync.Mutex // Protects rawManifest + rawManifest []byte +} + +var _ partial.CompressedImageCore = (*layoutImage)(nil) + +// Image reads a v1.Image with digest h from the Path. +func (l Path) Image(h v1.Hash) (v1.Image, error) { + ii, err := l.ImageIndex() + if err != nil { + return nil, err + } + + return ii.Image(h) +} + +func (li *layoutImage) MediaType() (types.MediaType, error) { + return li.desc.MediaType, nil +} + +// Implements WithManifest for partial.Blobset. +func (li *layoutImage) Manifest() (*v1.Manifest, error) { + return partial.Manifest(li) +} + +func (li *layoutImage) RawManifest() ([]byte, error) { + li.manifestLock.Lock() + defer li.manifestLock.Unlock() + if li.rawManifest != nil { + return li.rawManifest, nil + } + + b, err := li.path.Bytes(li.desc.Digest) + if err != nil { + return nil, err + } + + li.rawManifest = b + return li.rawManifest, nil +} + +func (li *layoutImage) RawConfigFile() ([]byte, error) { + manifest, err := li.Manifest() + if err != nil { + return nil, err + } + + return li.path.Bytes(manifest.Config.Digest) +} + +func (li *layoutImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { + manifest, err := li.Manifest() + if err != nil { + return nil, err + } + + if h == manifest.Config.Digest { + return &compressedBlob{ + path: li.path, + desc: manifest.Config, + }, nil + } + + for _, desc := range manifest.Layers { + if h == desc.Digest { + return &compressedBlob{ + path: li.path, + desc: desc, + }, nil + } + } + + return nil, fmt.Errorf("could not find layer in image: %s", h) +} + +type compressedBlob struct { + path Path + desc v1.Descriptor +} + +func (b *compressedBlob) Digest() (v1.Hash, error) { + return b.desc.Digest, nil +} + +func (b *compressedBlob) Compressed() (io.ReadCloser, error) { + return b.path.Blob(b.desc.Digest) +} + +func (b *compressedBlob) Size() (int64, error) { + return b.desc.Size, nil +} + +func (b *compressedBlob) MediaType() (types.MediaType, error) { + return b.desc.MediaType, nil +} + +// Descriptor implements partial.withDescriptor. +func (b *compressedBlob) Descriptor() (*v1.Descriptor, error) { + return &b.desc, nil +} + +// See partial.Exists. +func (b *compressedBlob) Exists() (bool, error) { + _, err := os.Stat(b.path.blobPath(b.desc.Digest)) + if os.IsNotExist(err) { + return false, nil + } + return err == nil, err +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/index.go new file mode 100644 index 00000000..7404f186 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/index.go @@ -0,0 +1,161 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package layout + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "os" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +var _ v1.ImageIndex = (*layoutIndex)(nil) + +type layoutIndex struct { + mediaType types.MediaType + path Path + rawIndex []byte +} + +// ImageIndexFromPath is a convenience function which constructs a Path and returns its v1.ImageIndex. +func ImageIndexFromPath(path string) (v1.ImageIndex, error) { + lp, err := FromPath(path) + if err != nil { + return nil, err + } + return lp.ImageIndex() +} + +// ImageIndex returns a v1.ImageIndex for the Path. +func (l Path) ImageIndex() (v1.ImageIndex, error) { + rawIndex, err := os.ReadFile(l.path("index.json")) + if err != nil { + return nil, err + } + + idx := &layoutIndex{ + mediaType: types.OCIImageIndex, + path: l, + rawIndex: rawIndex, + } + + return idx, nil +} + +func (i *layoutIndex) MediaType() (types.MediaType, error) { + return i.mediaType, nil +} + +func (i *layoutIndex) Digest() (v1.Hash, error) { + return partial.Digest(i) +} + +func (i *layoutIndex) Size() (int64, error) { + return partial.Size(i) +} + +func (i *layoutIndex) IndexManifest() (*v1.IndexManifest, error) { + var index v1.IndexManifest + err := json.Unmarshal(i.rawIndex, &index) + return &index, err +} + +func (i *layoutIndex) RawManifest() ([]byte, error) { + return i.rawIndex, nil +} + +func (i *layoutIndex) Image(h v1.Hash) (v1.Image, error) { + // Look up the digest in our manifest first to return a better error. + desc, err := i.findDescriptor(h) + if err != nil { + return nil, err + } + + if !isExpectedMediaType(desc.MediaType, types.OCIManifestSchema1, types.DockerManifestSchema2) { + return nil, fmt.Errorf("unexpected media type for %v: %s", h, desc.MediaType) + } + + img := &layoutImage{ + path: i.path, + desc: *desc, + } + return partial.CompressedToImage(img) +} + +func (i *layoutIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { + // Look up the digest in our manifest first to return a better error. + desc, err := i.findDescriptor(h) + if err != nil { + return nil, err + } + + if !isExpectedMediaType(desc.MediaType, types.OCIImageIndex, types.DockerManifestList) { + return nil, fmt.Errorf("unexpected media type for %v: %s", h, desc.MediaType) + } + + rawIndex, err := i.path.Bytes(h) + if err != nil { + return nil, err + } + + return &layoutIndex{ + mediaType: desc.MediaType, + path: i.path, + rawIndex: rawIndex, + }, nil +} + +func (i *layoutIndex) Blob(h v1.Hash) (io.ReadCloser, error) { + return i.path.Blob(h) +} + +func (i *layoutIndex) findDescriptor(h v1.Hash) (*v1.Descriptor, error) { + im, err := i.IndexManifest() + if err != nil { + return nil, err + } + + if h == (v1.Hash{}) { + if len(im.Manifests) != 1 { + return nil, errors.New("oci layout must contain only a single image to be used with layout.Image") + } + return &(im.Manifests)[0], nil + } + + for _, desc := range im.Manifests { + if desc.Digest == h { + return &desc, nil + } + } + + return nil, fmt.Errorf("could not find descriptor in index: %s", h) +} + +// TODO: Pull this out into methods on types.MediaType? e.g. instead, have: +// * mt.IsIndex() +// * mt.IsImage() +func isExpectedMediaType(mt types.MediaType, expected ...types.MediaType) bool { + for _, allowed := range expected { + if mt == allowed { + return true + } + } + return false +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/layoutpath.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/layoutpath.go new file mode 100644 index 00000000..a031ff5a --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/layoutpath.go @@ -0,0 +1,25 @@ +// Copyright 2019 The original author or authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package layout + +import "path/filepath" + +// Path represents an OCI image layout rooted in a file system path +type Path string + +func (l Path) path(elem ...string) string { + complete := []string{string(l)} + return filepath.Join(append(complete, elem...)...) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/options.go new file mode 100644 index 00000000..a26f9f37 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/options.go @@ -0,0 +1,71 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package layout + +import v1 "github.com/google/go-containerregistry/pkg/v1" + +// Option is a functional option for Layout. +type Option func(*options) + +type options struct { + descOpts []descriptorOption +} + +func makeOptions(opts ...Option) *options { + o := &options{ + descOpts: []descriptorOption{}, + } + for _, apply := range opts { + apply(o) + } + return o +} + +type descriptorOption func(*v1.Descriptor) + +// WithAnnotations adds annotations to the artifact descriptor. +func WithAnnotations(annotations map[string]string) Option { + return func(o *options) { + o.descOpts = append(o.descOpts, func(desc *v1.Descriptor) { + if desc.Annotations == nil { + desc.Annotations = make(map[string]string) + } + for k, v := range annotations { + desc.Annotations[k] = v + } + }) + } +} + +// WithURLs adds urls to the artifact descriptor. +func WithURLs(urls []string) Option { + return func(o *options) { + o.descOpts = append(o.descOpts, func(desc *v1.Descriptor) { + if desc.URLs == nil { + desc.URLs = []string{} + } + desc.URLs = append(desc.URLs, urls...) + }) + } +} + +// WithPlatform sets the platform of the artifact descriptor. +func WithPlatform(platform v1.Platform) Option { + return func(o *options) { + o.descOpts = append(o.descOpts, func(desc *v1.Descriptor) { + desc.Platform = &platform + }) + } +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/read.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/read.go new file mode 100644 index 00000000..796abc7d --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/read.go @@ -0,0 +1,32 @@ +// Copyright 2019 The original author or authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package layout + +import ( + "os" + "path/filepath" +) + +// FromPath reads an OCI image layout at path and constructs a layout.Path. +func FromPath(path string) (Path, error) { + // TODO: check oci-layout exists + + _, err := os.Stat(filepath.Join(path, "index.json")) + if err != nil { + return "", err + } + + return Path(path), nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go new file mode 100644 index 00000000..906b12ae --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go @@ -0,0 +1,481 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package layout + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/google/go-containerregistry/pkg/logs" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/match" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/stream" + "github.com/google/go-containerregistry/pkg/v1/types" + "golang.org/x/sync/errgroup" +) + +var layoutFile = `{ + "imageLayoutVersion": "1.0.0" +}` + +// AppendImage writes a v1.Image to the Path and updates +// the index.json to reference it. +func (l Path) AppendImage(img v1.Image, options ...Option) error { + if err := l.WriteImage(img); err != nil { + return err + } + + desc, err := partial.Descriptor(img) + if err != nil { + return err + } + + o := makeOptions(options...) + for _, opt := range o.descOpts { + opt(desc) + } + + return l.AppendDescriptor(*desc) +} + +// AppendIndex writes a v1.ImageIndex to the Path and updates +// the index.json to reference it. +func (l Path) AppendIndex(ii v1.ImageIndex, options ...Option) error { + if err := l.WriteIndex(ii); err != nil { + return err + } + + desc, err := partial.Descriptor(ii) + if err != nil { + return err + } + + o := makeOptions(options...) + for _, opt := range o.descOpts { + opt(desc) + } + + return l.AppendDescriptor(*desc) +} + +// AppendDescriptor adds a descriptor to the index.json of the Path. +func (l Path) AppendDescriptor(desc v1.Descriptor) error { + ii, err := l.ImageIndex() + if err != nil { + return err + } + + index, err := ii.IndexManifest() + if err != nil { + return err + } + + index.Manifests = append(index.Manifests, desc) + + rawIndex, err := json.MarshalIndent(index, "", " ") + if err != nil { + return err + } + + return l.WriteFile("index.json", rawIndex, os.ModePerm) +} + +// ReplaceImage writes a v1.Image to the Path and updates +// the index.json to reference it, replacing any existing one that matches matcher, if found. +func (l Path) ReplaceImage(img v1.Image, matcher match.Matcher, options ...Option) error { + if err := l.WriteImage(img); err != nil { + return err + } + + return l.replaceDescriptor(img, matcher, options...) +} + +// ReplaceIndex writes a v1.ImageIndex to the Path and updates +// the index.json to reference it, replacing any existing one that matches matcher, if found. +func (l Path) ReplaceIndex(ii v1.ImageIndex, matcher match.Matcher, options ...Option) error { + if err := l.WriteIndex(ii); err != nil { + return err + } + + return l.replaceDescriptor(ii, matcher, options...) +} + +// replaceDescriptor adds a descriptor to the index.json of the Path, replacing +// any one matching matcher, if found. +func (l Path) replaceDescriptor(append mutate.Appendable, matcher match.Matcher, options ...Option) error { + ii, err := l.ImageIndex() + if err != nil { + return err + } + + desc, err := partial.Descriptor(append) + if err != nil { + return err + } + + o := makeOptions(options...) + for _, opt := range o.descOpts { + opt(desc) + } + + add := mutate.IndexAddendum{ + Add: append, + Descriptor: *desc, + } + ii = mutate.AppendManifests(mutate.RemoveManifests(ii, matcher), add) + + index, err := ii.IndexManifest() + if err != nil { + return err + } + + rawIndex, err := json.MarshalIndent(index, "", " ") + if err != nil { + return err + } + + return l.WriteFile("index.json", rawIndex, os.ModePerm) +} + +// RemoveDescriptors removes any descriptors that match the match.Matcher from the index.json of the Path. +func (l Path) RemoveDescriptors(matcher match.Matcher) error { + ii, err := l.ImageIndex() + if err != nil { + return err + } + ii = mutate.RemoveManifests(ii, matcher) + + index, err := ii.IndexManifest() + if err != nil { + return err + } + + rawIndex, err := json.MarshalIndent(index, "", " ") + if err != nil { + return err + } + + return l.WriteFile("index.json", rawIndex, os.ModePerm) +} + +// WriteFile write a file with arbitrary data at an arbitrary location in a v1 +// layout. Used mostly internally to write files like "oci-layout" and +// "index.json", also can be used to write other arbitrary files. Do *not* use +// this to write blobs. Use only WriteBlob() for that. +func (l Path) WriteFile(name string, data []byte, perm os.FileMode) error { + if err := os.MkdirAll(l.path(), os.ModePerm); err != nil && !os.IsExist(err) { + return err + } + + return os.WriteFile(l.path(name), data, perm) +} + +// WriteBlob copies a file to the blobs/ directory in the Path from the given ReadCloser at +// blobs/{hash.Algorithm}/{hash.Hex}. +func (l Path) WriteBlob(hash v1.Hash, r io.ReadCloser) error { + return l.writeBlob(hash, -1, r, nil) +} + +func (l Path) writeBlob(hash v1.Hash, size int64, rc io.ReadCloser, renamer func() (v1.Hash, error)) error { + if hash.Hex == "" && renamer == nil { + panic("writeBlob called an invalid hash and no renamer") + } + + dir := l.path("blobs", hash.Algorithm) + if err := os.MkdirAll(dir, os.ModePerm); err != nil && !os.IsExist(err) { + return err + } + + // Check if blob already exists and is the correct size + file := filepath.Join(dir, hash.Hex) + if s, err := os.Stat(file); err == nil && !s.IsDir() && (s.Size() == size || size == -1) { + return nil + } + + // If a renamer func was provided write to a temporary file + open := func() (*os.File, error) { return os.Create(file) } + if renamer != nil { + open = func() (*os.File, error) { return os.CreateTemp(dir, hash.Hex) } + } + w, err := open() + if err != nil { + return err + } + if renamer != nil { + // Delete temp file if an error is encountered before renaming + defer func() { + if err := os.Remove(w.Name()); err != nil && !errors.Is(err, os.ErrNotExist) { + logs.Warn.Printf("error removing temporary file after encountering an error while writing blob: %v", err) + } + }() + } + defer w.Close() + + // Write to file and exit if not renaming + if n, err := io.Copy(w, rc); err != nil || renamer == nil { + return err + } else if size != -1 && n != size { + return fmt.Errorf("expected blob size %d, but only wrote %d", size, n) + } + + // Always close reader before renaming, since Close computes the digest in + // the case of streaming layers. If Close is not called explicitly, it will + // occur in a goroutine that is not guaranteed to succeed before renamer is + // called. When renamer is the layer's Digest method, it can return + // ErrNotComputed. + if err := rc.Close(); err != nil { + return err + } + + // Always close file before renaming + if err := w.Close(); err != nil { + return err + } + + // Rename file based on the final hash + finalHash, err := renamer() + if err != nil { + return fmt.Errorf("error getting final digest of layer: %w", err) + } + + renamePath := l.path("blobs", finalHash.Algorithm, finalHash.Hex) + return os.Rename(w.Name(), renamePath) +} + +// writeLayer writes the compressed layer to a blob. Unlike WriteBlob it will +// write to a temporary file (suffixed with .tmp) within the layout until the +// compressed reader is fully consumed and written to disk. Also unlike +// WriteBlob, it will not skip writing and exit without error when a blob file +// exists, but does not have the correct size. (The blob hash is not +// considered, because it may be expensive to compute.) +func (l Path) writeLayer(layer v1.Layer) error { + d, err := layer.Digest() + if errors.Is(err, stream.ErrNotComputed) { + // Allow digest errors, since streams may not have calculated the hash + // yet. Instead, use an empty value, which will be transformed into a + // random file name with `os.CreateTemp` and the final digest will be + // calculated after writing to a temp file and before renaming to the + // final path. + d = v1.Hash{Algorithm: "sha256", Hex: ""} + } else if err != nil { + return err + } + + s, err := layer.Size() + if errors.Is(err, stream.ErrNotComputed) { + // Allow size errors, since streams may not have calculated the size + // yet. Instead, use zero as a sentinel value meaning that no size + // comparison can be done and any sized blob file should be considered + // valid and not overwritten. + // + // TODO: Provide an option to always overwrite blobs. + s = -1 + } else if err != nil { + return err + } + + r, err := layer.Compressed() + if err != nil { + return err + } + + if err := l.writeBlob(d, s, r, layer.Digest); err != nil { + return fmt.Errorf("error writing layer: %w", err) + } + return nil +} + +// RemoveBlob removes a file from the blobs directory in the Path +// at blobs/{hash.Algorithm}/{hash.Hex} +// It does *not* remove any reference to it from other manifests or indexes, or +// from the root index.json. +func (l Path) RemoveBlob(hash v1.Hash) error { + dir := l.path("blobs", hash.Algorithm) + err := os.Remove(filepath.Join(dir, hash.Hex)) + if err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// WriteImage writes an image, including its manifest, config and all of its +// layers, to the blobs directory. If any blob already exists, as determined by +// the hash filename, does not write it. +// This function does *not* update the `index.json` file. If you want to write the +// image and also update the `index.json`, call AppendImage(), which wraps this +// and also updates the `index.json`. +func (l Path) WriteImage(img v1.Image) error { + layers, err := img.Layers() + if err != nil { + return err + } + + // Write the layers concurrently. + var g errgroup.Group + for _, layer := range layers { + layer := layer + g.Go(func() error { + return l.writeLayer(layer) + }) + } + if err := g.Wait(); err != nil { + return err + } + + // Write the config. + cfgName, err := img.ConfigName() + if err != nil { + return err + } + cfgBlob, err := img.RawConfigFile() + if err != nil { + return err + } + if err := l.WriteBlob(cfgName, io.NopCloser(bytes.NewReader(cfgBlob))); err != nil { + return err + } + + // Write the img manifest. + d, err := img.Digest() + if err != nil { + return err + } + manifest, err := img.RawManifest() + if err != nil { + return err + } + + return l.WriteBlob(d, io.NopCloser(bytes.NewReader(manifest))) +} + +type withLayer interface { + Layer(v1.Hash) (v1.Layer, error) +} + +type withBlob interface { + Blob(v1.Hash) (io.ReadCloser, error) +} + +func (l Path) writeIndexToFile(indexFile string, ii v1.ImageIndex) error { + index, err := ii.IndexManifest() + if err != nil { + return err + } + + // Walk the descriptors and write any v1.Image or v1.ImageIndex that we find. + // If we come across something we don't expect, just write it as a blob. + for _, desc := range index.Manifests { + switch desc.MediaType { + case types.OCIImageIndex, types.DockerManifestList: + ii, err := ii.ImageIndex(desc.Digest) + if err != nil { + return err + } + if err := l.WriteIndex(ii); err != nil { + return err + } + case types.OCIManifestSchema1, types.DockerManifestSchema2: + img, err := ii.Image(desc.Digest) + if err != nil { + return err + } + if err := l.WriteImage(img); err != nil { + return err + } + default: + // TODO: The layout could reference arbitrary things, which we should + // probably just pass through. + + var blob io.ReadCloser + // Workaround for #819. + if wl, ok := ii.(withLayer); ok { + layer, lerr := wl.Layer(desc.Digest) + if lerr != nil { + return lerr + } + blob, err = layer.Compressed() + } else if wb, ok := ii.(withBlob); ok { + blob, err = wb.Blob(desc.Digest) + } + if err != nil { + return err + } + if err := l.WriteBlob(desc.Digest, blob); err != nil { + return err + } + } + } + + rawIndex, err := ii.RawManifest() + if err != nil { + return err + } + + return l.WriteFile(indexFile, rawIndex, os.ModePerm) +} + +// WriteIndex writes an index to the blobs directory. Walks down the children, +// including its children manifests and/or indexes, and down the tree until all of +// config and all layers, have been written. If any blob already exists, as determined by +// the hash filename, does not write it. +// This function does *not* update the `index.json` file. If you want to write the +// index and also update the `index.json`, call AppendIndex(), which wraps this +// and also updates the `index.json`. +func (l Path) WriteIndex(ii v1.ImageIndex) error { + // Always just write oci-layout file, since it's small. + if err := l.WriteFile("oci-layout", []byte(layoutFile), os.ModePerm); err != nil { + return err + } + + h, err := ii.Digest() + if err != nil { + return err + } + + indexFile := filepath.Join("blobs", h.Algorithm, h.Hex) + return l.writeIndexToFile(indexFile, ii) +} + +// Write constructs a Path at path from an ImageIndex. +// +// The contents are written in the following format: +// At the top level, there is: +// +// One oci-layout file containing the version of this image-layout. +// One index.json file listing descriptors for the contained images. +// +// Under blobs/, there is, for each image: +// +// One file for each layer, named after the layer's SHA. +// One file for each config blob, named after its SHA. +// One file for each manifest blob, named after its SHA. +func Write(path string, ii v1.ImageIndex) (Path, error) { + lp := Path(path) + // Always just write oci-layout file, since it's small. + if err := lp.WriteFile("oci-layout", []byte(layoutFile), os.ModePerm); err != nil { + return "", err + } + + // TODO create blobs/ in case there is a blobs file which would prevent the directory from being created + + return lp, lp.writeIndexToFile("index.json", ii) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go b/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go new file mode 100644 index 00000000..8ded1ee5 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go @@ -0,0 +1,68 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "encoding/json" + "io" + + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Manifest represents the OCI image manifest in a structured way. +type Manifest struct { + SchemaVersion int64 `json:"schemaVersion"` + MediaType types.MediaType `json:"mediaType,omitempty"` + Config Descriptor `json:"config"` + Layers []Descriptor `json:"layers"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// IndexManifest represents an OCI image index in a structured way. +type IndexManifest struct { + SchemaVersion int64 `json:"schemaVersion"` + MediaType types.MediaType `json:"mediaType,omitempty"` + Manifests []Descriptor `json:"manifests"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// Descriptor holds a reference from the manifest to one of its constituent elements. +type Descriptor struct { + MediaType types.MediaType `json:"mediaType"` + Size int64 `json:"size"` + Digest Hash `json:"digest"` + Data []byte `json:"data,omitempty"` + URLs []string `json:"urls,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + Platform *Platform `json:"platform,omitempty"` +} + +// ParseManifest parses the io.Reader's contents into a Manifest. +func ParseManifest(r io.Reader) (*Manifest, error) { + m := Manifest{} + if err := json.NewDecoder(r).Decode(&m); err != nil { + return nil, err + } + return &m, nil +} + +// ParseIndexManifest parses the io.Reader's contents into an IndexManifest. +func ParseIndexManifest(r io.Reader) (*IndexManifest, error) { + im := IndexManifest{} + if err := json.NewDecoder(r).Decode(&im); err != nil { + return nil, err + } + return &im, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go b/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go new file mode 100644 index 00000000..98b1ff90 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go @@ -0,0 +1,92 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package match provides functionality for conveniently matching a v1.Descriptor. +package match + +import ( + v1 "github.com/google/go-containerregistry/pkg/v1" + imagespec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Matcher function that is given a v1.Descriptor, and returns whether or +// not it matches a given rule. Can match on anything it wants in the Descriptor. +type Matcher func(desc v1.Descriptor) bool + +// Name returns a match.Matcher that matches based on the value of the +// +// "org.opencontainers.image.ref.name" annotation: +// +// github.com/opencontainers/image-spec/blob/v1.0.1/annotations.md#pre-defined-annotation-keys +func Name(name string) Matcher { + return Annotation(imagespec.AnnotationRefName, name) +} + +// Annotation returns a match.Matcher that matches based on the provided annotation. +func Annotation(key, value string) Matcher { + return func(desc v1.Descriptor) bool { + if desc.Annotations == nil { + return false + } + if aValue, ok := desc.Annotations[key]; ok && aValue == value { + return true + } + return false + } +} + +// Platforms returns a match.Matcher that matches on any one of the provided platforms. +// Ignores any descriptors that do not have a platform. +func Platforms(platforms ...v1.Platform) Matcher { + return func(desc v1.Descriptor) bool { + if desc.Platform == nil { + return false + } + for _, platform := range platforms { + if desc.Platform.Equals(platform) { + return true + } + } + return false + } +} + +// MediaTypes returns a match.Matcher that matches at least one of the provided media types. +func MediaTypes(mediaTypes ...string) Matcher { + mts := map[string]bool{} + for _, media := range mediaTypes { + mts[media] = true + } + return func(desc v1.Descriptor) bool { + if desc.MediaType == "" { + return false + } + if _, ok := mts[string(desc.MediaType)]; ok { + return true + } + return false + } +} + +// Digests returns a match.Matcher that matches at least one of the provided Digests +func Digests(digests ...v1.Hash) Matcher { + digs := map[v1.Hash]bool{} + for _, digest := range digests { + digs[digest] = true + } + return func(desc v1.Descriptor) bool { + _, ok := digs[desc.Digest] + return ok + } +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/README.md new file mode 100644 index 00000000..19e16124 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/README.md @@ -0,0 +1,56 @@ +# `mutate` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/mutate) + +The `v1.Image`, `v1.ImageIndex`, and `v1.Layer` interfaces provide only +accessor methods, so they are essentially immutable. If you want to change +something about them, you need to produce a new instance of that interface. + +A common use case for this library is to read an image from somewhere (a source), +change something about it, and write the image somewhere else (a sink). + +Graphically, this looks something like: + +

+ +

+ +## Mutations + +This is obviously not a comprehensive set of useful transformations (PRs welcome!), +but a rough summary of what the `mutate` package currently does: + +### `Config` and `ConfigFile` + +These allow you to change the [image configuration](https://github.com/opencontainers/image-spec/blob/master/config.md#properties), +e.g. to change the entrypoint, environment, author, etc. + +### `Time`, `Canonical`, and `CreatedAt` + +These are useful in the context of [reproducible builds](https://reproducible-builds.org/), +where you may want to strip timestamps and other non-reproducible information. + +### `Append`, `AppendLayers`, and `AppendManifests` + +These functions allow the extension of a `v1.Image` or `v1.ImageIndex` with +new layers or manifests. + +For constructing an image `FROM scratch`, see the [`empty`](/pkg/v1/empty) package. + +### `MediaType` and `IndexMediaType` + +Sometimes, it is necessary to change the media type of an image or index, +e.g. to appease a registry with strict validation of images (_looking at you, GCR_). + +### `Rebase` + +Rebase has [its own README](/cmd/crane/rebase.md). + +This is the underlying implementation of [`crane rebase`](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane_rebase.md). + +### `Extract` + +Extract will flatten an image filesystem into a single tar stream, +respecting whiteout files. + +This is the underlying implementation of [`crane export`](https://github.com/google/go-containerregistry/blob/main/cmd/crane/doc/crane_export.md). diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/doc.go new file mode 100644 index 00000000..dfbd9951 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package mutate provides facilities for mutating v1.Images of any kind. +package mutate diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go new file mode 100644 index 00000000..93c230e3 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go @@ -0,0 +1,285 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mutate + +import ( + "bytes" + "encoding/json" + "errors" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/stream" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type image struct { + base v1.Image + adds []Addendum + + computed bool + configFile *v1.ConfigFile + manifest *v1.Manifest + annotations map[string]string + mediaType *types.MediaType + configMediaType *types.MediaType + diffIDMap map[v1.Hash]v1.Layer + digestMap map[v1.Hash]v1.Layer +} + +var _ v1.Image = (*image)(nil) + +func (i *image) MediaType() (types.MediaType, error) { + if i.mediaType != nil { + return *i.mediaType, nil + } + return i.base.MediaType() +} + +func (i *image) compute() error { + // Don't re-compute if already computed. + if i.computed { + return nil + } + var configFile *v1.ConfigFile + if i.configFile != nil { + configFile = i.configFile + } else { + cf, err := i.base.ConfigFile() + if err != nil { + return err + } + configFile = cf.DeepCopy() + } + diffIDs := configFile.RootFS.DiffIDs + history := configFile.History + + diffIDMap := make(map[v1.Hash]v1.Layer) + digestMap := make(map[v1.Hash]v1.Layer) + + for _, add := range i.adds { + history = append(history, add.History) + if add.Layer != nil { + diffID, err := add.Layer.DiffID() + if err != nil { + return err + } + diffIDs = append(diffIDs, diffID) + diffIDMap[diffID] = add.Layer + } + } + + m, err := i.base.Manifest() + if err != nil { + return err + } + manifest := m.DeepCopy() + manifestLayers := manifest.Layers + for _, add := range i.adds { + if add.Layer == nil { + // Empty layers include only history in manifest. + continue + } + + desc, err := partial.Descriptor(add.Layer) + if err != nil { + return err + } + + // Fields in the addendum override the original descriptor. + if len(add.Annotations) != 0 { + desc.Annotations = add.Annotations + } + if len(add.URLs) != 0 { + desc.URLs = add.URLs + } + + if add.MediaType != "" { + desc.MediaType = add.MediaType + } + + manifestLayers = append(manifestLayers, *desc) + digestMap[desc.Digest] = add.Layer + } + + configFile.RootFS.DiffIDs = diffIDs + configFile.History = history + + manifest.Layers = manifestLayers + + rcfg, err := json.Marshal(configFile) + if err != nil { + return err + } + d, sz, err := v1.SHA256(bytes.NewBuffer(rcfg)) + if err != nil { + return err + } + manifest.Config.Digest = d + manifest.Config.Size = sz + + // If Data was set in the base image, we need to update it in the mutated image. + if m.Config.Data != nil { + manifest.Config.Data = rcfg + } + + // If the user wants to mutate the media type of the config + if i.configMediaType != nil { + manifest.Config.MediaType = *i.configMediaType + } + + if i.mediaType != nil { + manifest.MediaType = *i.mediaType + } + + if i.annotations != nil { + if manifest.Annotations == nil { + manifest.Annotations = map[string]string{} + } + + for k, v := range i.annotations { + manifest.Annotations[k] = v + } + } + + i.configFile = configFile + i.manifest = manifest + i.diffIDMap = diffIDMap + i.digestMap = digestMap + i.computed = true + return nil +} + +// Layers returns the ordered collection of filesystem layers that comprise this image. +// The order of the list is oldest/base layer first, and most-recent/top layer last. +func (i *image) Layers() ([]v1.Layer, error) { + if err := i.compute(); errors.Is(err, stream.ErrNotComputed) { + // Image contains a streamable layer which has not yet been + // consumed. Just return the layers we have in case the caller + // is going to consume the layers. + layers, err := i.base.Layers() + if err != nil { + return nil, err + } + for _, add := range i.adds { + layers = append(layers, add.Layer) + } + return layers, nil + } else if err != nil { + return nil, err + } + + diffIDs, err := partial.DiffIDs(i) + if err != nil { + return nil, err + } + ls := make([]v1.Layer, 0, len(diffIDs)) + for _, h := range diffIDs { + l, err := i.LayerByDiffID(h) + if err != nil { + return nil, err + } + ls = append(ls, l) + } + return ls, nil +} + +// ConfigName returns the hash of the image's config file. +func (i *image) ConfigName() (v1.Hash, error) { + if err := i.compute(); err != nil { + return v1.Hash{}, err + } + return partial.ConfigName(i) +} + +// ConfigFile returns this image's config file. +func (i *image) ConfigFile() (*v1.ConfigFile, error) { + if err := i.compute(); err != nil { + return nil, err + } + return i.configFile.DeepCopy(), nil +} + +// RawConfigFile returns the serialized bytes of ConfigFile() +func (i *image) RawConfigFile() ([]byte, error) { + if err := i.compute(); err != nil { + return nil, err + } + return json.Marshal(i.configFile) +} + +// Digest returns the sha256 of this image's manifest. +func (i *image) Digest() (v1.Hash, error) { + if err := i.compute(); err != nil { + return v1.Hash{}, err + } + return partial.Digest(i) +} + +// Size implements v1.Image. +func (i *image) Size() (int64, error) { + if err := i.compute(); err != nil { + return -1, err + } + return partial.Size(i) +} + +// Manifest returns this image's Manifest object. +func (i *image) Manifest() (*v1.Manifest, error) { + if err := i.compute(); err != nil { + return nil, err + } + return i.manifest.DeepCopy(), nil +} + +// RawManifest returns the serialized bytes of Manifest() +func (i *image) RawManifest() ([]byte, error) { + if err := i.compute(); err != nil { + return nil, err + } + return json.Marshal(i.manifest) +} + +// LayerByDigest returns a Layer for interacting with a particular layer of +// the image, looking it up by "digest" (the compressed hash). +func (i *image) LayerByDigest(h v1.Hash) (v1.Layer, error) { + if cn, err := i.ConfigName(); err != nil { + return nil, err + } else if h == cn { + return partial.ConfigLayer(i) + } + if layer, ok := i.digestMap[h]; ok { + return layer, nil + } + return i.base.LayerByDigest(h) +} + +// LayerByDiffID is an analog to LayerByDigest, looking up by "diff id" +// (the uncompressed hash). +func (i *image) LayerByDiffID(h v1.Hash) (v1.Layer, error) { + if layer, ok := i.diffIDMap[h]; ok { + return layer, nil + } + return i.base.LayerByDiffID(h) +} + +func validate(adds []Addendum) error { + for _, add := range adds { + if add.Layer == nil && !add.History.EmptyLayer { + return errors.New("unable to add a nil layer to the image") + } + } + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go new file mode 100644 index 00000000..841995f1 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go @@ -0,0 +1,202 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mutate + +import ( + "encoding/json" + "fmt" + + "github.com/google/go-containerregistry/pkg/logs" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/match" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +func computeDescriptor(ia IndexAddendum) (*v1.Descriptor, error) { + desc, err := partial.Descriptor(ia.Add) + if err != nil { + return nil, err + } + + // The IndexAddendum allows overriding Descriptor values. + if ia.Descriptor.Size != 0 { + desc.Size = ia.Descriptor.Size + } + if string(ia.Descriptor.MediaType) != "" { + desc.MediaType = ia.Descriptor.MediaType + } + if ia.Descriptor.Digest != (v1.Hash{}) { + desc.Digest = ia.Descriptor.Digest + } + if ia.Descriptor.Platform != nil { + desc.Platform = ia.Descriptor.Platform + } + if len(ia.Descriptor.URLs) != 0 { + desc.URLs = ia.Descriptor.URLs + } + if len(ia.Descriptor.Annotations) != 0 { + desc.Annotations = ia.Descriptor.Annotations + } + if ia.Descriptor.Data != nil { + desc.Data = ia.Descriptor.Data + } + + return desc, nil +} + +type index struct { + base v1.ImageIndex + adds []IndexAddendum + // remove is removed before adds + remove match.Matcher + + computed bool + manifest *v1.IndexManifest + annotations map[string]string + mediaType *types.MediaType + imageMap map[v1.Hash]v1.Image + indexMap map[v1.Hash]v1.ImageIndex + layerMap map[v1.Hash]v1.Layer +} + +var _ v1.ImageIndex = (*index)(nil) + +func (i *index) MediaType() (types.MediaType, error) { + if i.mediaType != nil { + return *i.mediaType, nil + } + return i.base.MediaType() +} + +func (i *index) Size() (int64, error) { return partial.Size(i) } + +func (i *index) compute() error { + // Don't re-compute if already computed. + if i.computed { + return nil + } + + i.imageMap = make(map[v1.Hash]v1.Image) + i.indexMap = make(map[v1.Hash]v1.ImageIndex) + i.layerMap = make(map[v1.Hash]v1.Layer) + + m, err := i.base.IndexManifest() + if err != nil { + return err + } + manifest := m.DeepCopy() + manifests := manifest.Manifests + + if i.remove != nil { + var cleanedManifests []v1.Descriptor + for _, m := range manifests { + if !i.remove(m) { + cleanedManifests = append(cleanedManifests, m) + } + } + manifests = cleanedManifests + } + + for _, add := range i.adds { + desc, err := computeDescriptor(add) + if err != nil { + return err + } + + manifests = append(manifests, *desc) + if idx, ok := add.Add.(v1.ImageIndex); ok { + i.indexMap[desc.Digest] = idx + } else if img, ok := add.Add.(v1.Image); ok { + i.imageMap[desc.Digest] = img + } else if l, ok := add.Add.(v1.Layer); ok { + i.layerMap[desc.Digest] = l + } else { + logs.Warn.Printf("Unexpected index addendum: %T", add.Add) + } + } + + manifest.Manifests = manifests + + if i.mediaType != nil { + manifest.MediaType = *i.mediaType + } + + if i.annotations != nil { + if manifest.Annotations == nil { + manifest.Annotations = map[string]string{} + } + for k, v := range i.annotations { + manifest.Annotations[k] = v + } + } + + i.manifest = manifest + i.computed = true + return nil +} + +func (i *index) Image(h v1.Hash) (v1.Image, error) { + if img, ok := i.imageMap[h]; ok { + return img, nil + } + return i.base.Image(h) +} + +func (i *index) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { + if idx, ok := i.indexMap[h]; ok { + return idx, nil + } + return i.base.ImageIndex(h) +} + +type withLayer interface { + Layer(v1.Hash) (v1.Layer, error) +} + +// Workaround for #819. +func (i *index) Layer(h v1.Hash) (v1.Layer, error) { + if layer, ok := i.layerMap[h]; ok { + return layer, nil + } + if wl, ok := i.base.(withLayer); ok { + return wl.Layer(h) + } + return nil, fmt.Errorf("layer not found: %s", h) +} + +// Digest returns the sha256 of this image's manifest. +func (i *index) Digest() (v1.Hash, error) { + if err := i.compute(); err != nil { + return v1.Hash{}, err + } + return partial.Digest(i) +} + +// Manifest returns this image's Manifest object. +func (i *index) IndexManifest() (*v1.IndexManifest, error) { + if err := i.compute(); err != nil { + return nil, err + } + return i.manifest.DeepCopy(), nil +} + +// RawManifest returns the serialized bytes of Manifest() +func (i *index) RawManifest() ([]byte, error) { + if err := i.compute(); err != nil { + return nil, err + } + return json.Marshal(i.manifest) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go new file mode 100644 index 00000000..022fc818 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go @@ -0,0 +1,516 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mutate + +import ( + "archive/tar" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "path/filepath" + "strings" + "time" + + "github.com/google/go-containerregistry/internal/gzip" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/match" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/tarball" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +const whiteoutPrefix = ".wh." + +// Addendum contains layers and history to be appended +// to a base image +type Addendum struct { + Layer v1.Layer + History v1.History + URLs []string + Annotations map[string]string + MediaType types.MediaType +} + +// AppendLayers applies layers to a base image. +func AppendLayers(base v1.Image, layers ...v1.Layer) (v1.Image, error) { + additions := make([]Addendum, 0, len(layers)) + for _, layer := range layers { + additions = append(additions, Addendum{Layer: layer}) + } + + return Append(base, additions...) +} + +// Append will apply the list of addendums to the base image +func Append(base v1.Image, adds ...Addendum) (v1.Image, error) { + if len(adds) == 0 { + return base, nil + } + if err := validate(adds); err != nil { + return nil, err + } + + return &image{ + base: base, + adds: adds, + }, nil +} + +// Appendable is an interface that represents something that can be appended +// to an ImageIndex. We need to be able to construct a v1.Descriptor in order +// to append something, and this is the minimum required information for that. +type Appendable interface { + MediaType() (types.MediaType, error) + Digest() (v1.Hash, error) + Size() (int64, error) +} + +// IndexAddendum represents an appendable thing and all the properties that +// we may want to override in the resulting v1.Descriptor. +type IndexAddendum struct { + Add Appendable + v1.Descriptor +} + +// AppendManifests appends a manifest to the ImageIndex. +func AppendManifests(base v1.ImageIndex, adds ...IndexAddendum) v1.ImageIndex { + return &index{ + base: base, + adds: adds, + } +} + +// RemoveManifests removes any descriptors that match the match.Matcher. +func RemoveManifests(base v1.ImageIndex, matcher match.Matcher) v1.ImageIndex { + return &index{ + base: base, + remove: matcher, + } +} + +// Config mutates the provided v1.Image to have the provided v1.Config +func Config(base v1.Image, cfg v1.Config) (v1.Image, error) { + cf, err := base.ConfigFile() + if err != nil { + return nil, err + } + + cf.Config = cfg + + return ConfigFile(base, cf) +} + +// Annotatable represents a manifest that can carry annotations. +type Annotatable interface { + partial.WithRawManifest +} + +// Annotations mutates the annotations on an annotatable image or index manifest. +// +// The annotatable input is expected to be a v1.Image or v1.ImageIndex, and +// returns the same type. You can type-assert the result like so: +// +// img := Annotations(empty.Image, map[string]string{ +// "foo": "bar", +// }).(v1.Image) +// +// Or for an index: +// +// idx := Annotations(empty.Index, map[string]string{ +// "foo": "bar", +// }).(v1.ImageIndex) +// +// If the input Annotatable is not an Image or ImageIndex, the result will +// attempt to lazily annotate the raw manifest. +func Annotations(f Annotatable, anns map[string]string) Annotatable { + if img, ok := f.(v1.Image); ok { + return &image{ + base: img, + annotations: anns, + } + } + if idx, ok := f.(v1.ImageIndex); ok { + return &index{ + base: idx, + annotations: anns, + } + } + return arbitraryRawManifest{f, anns} +} + +type arbitraryRawManifest struct { + a Annotatable + anns map[string]string +} + +func (a arbitraryRawManifest) RawManifest() ([]byte, error) { + b, err := a.a.RawManifest() + if err != nil { + return nil, err + } + var m map[string]any + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + if ann, ok := m["annotations"]; ok { + if annm, ok := ann.(map[string]string); ok { + for k, v := range a.anns { + annm[k] = v + } + } else { + return nil, fmt.Errorf(".annotations is not a map: %T", ann) + } + } else { + m["annotations"] = a.anns + } + return json.Marshal(m) +} + +// ConfigFile mutates the provided v1.Image to have the provided v1.ConfigFile +func ConfigFile(base v1.Image, cfg *v1.ConfigFile) (v1.Image, error) { + m, err := base.Manifest() + if err != nil { + return nil, err + } + + image := &image{ + base: base, + manifest: m.DeepCopy(), + configFile: cfg, + } + + return image, nil +} + +// CreatedAt mutates the provided v1.Image to have the provided v1.Time +func CreatedAt(base v1.Image, created v1.Time) (v1.Image, error) { + cf, err := base.ConfigFile() + if err != nil { + return nil, err + } + + cfg := cf.DeepCopy() + cfg.Created = created + + return ConfigFile(base, cfg) +} + +// Extract takes an image and returns an io.ReadCloser containing the image's +// flattened filesystem. +// +// Callers can read the filesystem contents by passing the reader to +// tar.NewReader, or io.Copy it directly to some output. +// +// If a caller doesn't read the full contents, they should Close it to free up +// resources used during extraction. +func Extract(img v1.Image) io.ReadCloser { + pr, pw := io.Pipe() + + go func() { + // Close the writer with any errors encountered during + // extraction. These errors will be returned by the reader end + // on subsequent reads. If err == nil, the reader will return + // EOF. + pw.CloseWithError(extract(img, pw)) + }() + + return pr +} + +// Adapted from https://github.com/google/containerregistry/blob/da03b395ccdc4e149e34fbb540483efce962dc64/client/v2_2/docker_image_.py#L816 +func extract(img v1.Image, w io.Writer) error { + tarWriter := tar.NewWriter(w) + defer tarWriter.Close() + + fileMap := map[string]bool{} + + layers, err := img.Layers() + if err != nil { + return fmt.Errorf("retrieving image layers: %w", err) + } + + // we iterate through the layers in reverse order because it makes handling + // whiteout layers more efficient, since we can just keep track of the removed + // files as we see .wh. layers and ignore those in previous layers. + for i := len(layers) - 1; i >= 0; i-- { + layer := layers[i] + layerReader, err := layer.Uncompressed() + if err != nil { + return fmt.Errorf("reading layer contents: %w", err) + } + defer layerReader.Close() + tarReader := tar.NewReader(layerReader) + for { + header, err := tarReader.Next() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return fmt.Errorf("reading tar: %w", err) + } + + // Some tools prepend everything with "./", so if we don't Clean the + // name, we may have duplicate entries, which angers tar-split. + header.Name = filepath.Clean(header.Name) + // force PAX format to remove Name/Linkname length limit of 100 characters + // required by USTAR and to not depend on internal tar package guess which + // prefers USTAR over PAX + header.Format = tar.FormatPAX + + basename := filepath.Base(header.Name) + dirname := filepath.Dir(header.Name) + tombstone := strings.HasPrefix(basename, whiteoutPrefix) + if tombstone { + basename = basename[len(whiteoutPrefix):] + } + + // check if we have seen value before + // if we're checking a directory, don't filepath.Join names + var name string + if header.Typeflag == tar.TypeDir { + name = header.Name + } else { + name = filepath.Join(dirname, basename) + } + + if _, ok := fileMap[name]; ok { + continue + } + + // check for a whited out parent directory + if inWhiteoutDir(fileMap, name) { + continue + } + + // mark file as handled. non-directory implicitly tombstones + // any entries with a matching (or child) name + fileMap[name] = tombstone || !(header.Typeflag == tar.TypeDir) + if !tombstone { + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if header.Size > 0 { + if _, err := io.CopyN(tarWriter, tarReader, header.Size); err != nil { + return err + } + } + } + } + } + return nil +} + +func inWhiteoutDir(fileMap map[string]bool, file string) bool { + for { + if file == "" { + break + } + dirname := filepath.Dir(file) + if file == dirname { + break + } + if val, ok := fileMap[dirname]; ok && val { + return true + } + file = dirname + } + return false +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +// Time sets all timestamps in an image to the given timestamp. +func Time(img v1.Image, t time.Time) (v1.Image, error) { + newImage := empty.Image + + layers, err := img.Layers() + if err != nil { + return nil, fmt.Errorf("getting image layers: %w", err) + } + + ocf, err := img.ConfigFile() + if err != nil { + return nil, fmt.Errorf("getting original config file: %w", err) + } + + addendums := make([]Addendum, max(len(ocf.History), len(layers))) + var historyIdx, addendumIdx int + for layerIdx := 0; layerIdx < len(layers); addendumIdx, layerIdx = addendumIdx+1, layerIdx+1 { + newLayer, err := layerTime(layers[layerIdx], t) + if err != nil { + return nil, fmt.Errorf("setting layer times: %w", err) + } + + // try to search for the history entry that corresponds to this layer + for ; historyIdx < len(ocf.History); historyIdx++ { + addendums[addendumIdx].History = ocf.History[historyIdx] + // if it's an EmptyLayer, do not set the Layer and have the Addendum with just the History + // and move on to the next History entry + if ocf.History[historyIdx].EmptyLayer { + addendumIdx++ + continue + } + // otherwise, we can exit from the cycle + historyIdx++ + break + } + addendums[addendumIdx].Layer = newLayer + } + + // add all leftover History entries + for ; historyIdx < len(ocf.History); historyIdx, addendumIdx = historyIdx+1, addendumIdx+1 { + addendums[addendumIdx].History = ocf.History[historyIdx] + } + + newImage, err = Append(newImage, addendums...) + if err != nil { + return nil, fmt.Errorf("appending layers: %w", err) + } + + cf, err := newImage.ConfigFile() + if err != nil { + return nil, fmt.Errorf("setting config file: %w", err) + } + + cfg := cf.DeepCopy() + + // Copy basic config over + cfg.Architecture = ocf.Architecture + cfg.OS = ocf.OS + cfg.OSVersion = ocf.OSVersion + cfg.Config = ocf.Config + + // Strip away timestamps from the config file + cfg.Created = v1.Time{Time: t} + + for i, h := range cfg.History { + h.Created = v1.Time{Time: t} + h.CreatedBy = ocf.History[i].CreatedBy + h.Comment = ocf.History[i].Comment + h.EmptyLayer = ocf.History[i].EmptyLayer + // Explicitly ignore Author field; which hinders reproducibility + h.Author = "" + cfg.History[i] = h + } + + return ConfigFile(newImage, cfg) +} + +func layerTime(layer v1.Layer, t time.Time) (v1.Layer, error) { + layerReader, err := layer.Uncompressed() + if err != nil { + return nil, fmt.Errorf("getting layer: %w", err) + } + defer layerReader.Close() + w := new(bytes.Buffer) + tarWriter := tar.NewWriter(w) + defer tarWriter.Close() + + tarReader := tar.NewReader(layerReader) + for { + header, err := tarReader.Next() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, fmt.Errorf("reading layer: %w", err) + } + + header.ModTime = t + if err := tarWriter.WriteHeader(header); err != nil { + return nil, fmt.Errorf("writing tar header: %w", err) + } + + if header.Typeflag == tar.TypeReg { + // TODO(#1168): This should be lazy, and not buffer the entire layer contents. + if _, err = io.CopyN(tarWriter, tarReader, header.Size); err != nil { + return nil, fmt.Errorf("writing layer file: %w", err) + } + } + } + + if err := tarWriter.Close(); err != nil { + return nil, err + } + + b := w.Bytes() + // gzip the contents, then create the layer + opener := func() (io.ReadCloser, error) { + return gzip.ReadCloser(io.NopCloser(bytes.NewReader(b))), nil + } + layer, err = tarball.LayerFromOpener(opener) + if err != nil { + return nil, fmt.Errorf("creating layer: %w", err) + } + + return layer, nil +} + +// Canonical is a helper function to combine Time and configFile +// to remove any randomness during a docker build. +func Canonical(img v1.Image) (v1.Image, error) { + // Set all timestamps to 0 + created := time.Time{} + img, err := Time(img, created) + if err != nil { + return nil, err + } + + cf, err := img.ConfigFile() + if err != nil { + return nil, err + } + + // Get rid of host-dependent random config + cfg := cf.DeepCopy() + + cfg.Container = "" + cfg.Config.Hostname = "" + cfg.DockerVersion = "" + + return ConfigFile(img, cfg) +} + +// MediaType modifies the MediaType() of the given image. +func MediaType(img v1.Image, mt types.MediaType) v1.Image { + return &image{ + base: img, + mediaType: &mt, + } +} + +// ConfigMediaType modifies the MediaType() of the given image's Config. +func ConfigMediaType(img v1.Image, mt types.MediaType) v1.Image { + return &image{ + base: img, + configMediaType: &mt, + } +} + +// IndexMediaType modifies the MediaType() of the given index. +func IndexMediaType(idx v1.ImageIndex, mt types.MediaType) v1.ImageIndex { + return &index{ + base: idx, + mediaType: &mt, + } +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go new file mode 100644 index 00000000..c606e0b7 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go @@ -0,0 +1,144 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mutate + +import ( + "fmt" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" +) + +// Rebase returns a new v1.Image where the oldBase in orig is replaced by newBase. +func Rebase(orig, oldBase, newBase v1.Image) (v1.Image, error) { + // Verify that oldBase's layers are present in orig, otherwise orig is + // not based on oldBase at all. + origLayers, err := orig.Layers() + if err != nil { + return nil, fmt.Errorf("failed to get layers for original: %w", err) + } + oldBaseLayers, err := oldBase.Layers() + if err != nil { + return nil, err + } + if len(oldBaseLayers) > len(origLayers) { + return nil, fmt.Errorf("image %q is not based on %q (too few layers)", orig, oldBase) + } + for i, l := range oldBaseLayers { + oldLayerDigest, err := l.Digest() + if err != nil { + return nil, fmt.Errorf("failed to get digest of layer %d of %q: %w", i, oldBase, err) + } + origLayerDigest, err := origLayers[i].Digest() + if err != nil { + return nil, fmt.Errorf("failed to get digest of layer %d of %q: %w", i, orig, err) + } + if oldLayerDigest != origLayerDigest { + return nil, fmt.Errorf("image %q is not based on %q (layer %d mismatch)", orig, oldBase, i) + } + } + + oldConfig, err := oldBase.ConfigFile() + if err != nil { + return nil, fmt.Errorf("failed to get config for old base: %w", err) + } + + origConfig, err := orig.ConfigFile() + if err != nil { + return nil, fmt.Errorf("failed to get config for original: %w", err) + } + + newConfig, err := newBase.ConfigFile() + if err != nil { + return nil, fmt.Errorf("could not get config for new base: %w", err) + } + + // Stitch together an image that contains: + // - original image's config + // - new base image's os/arch properties + // - new base image's layers + top of original image's layers + // - new base image's history + top of original image's history + rebasedImage, err := Config(empty.Image, *origConfig.Config.DeepCopy()) + if err != nil { + return nil, fmt.Errorf("failed to create empty image with original config: %w", err) + } + + // Add new config properties from existing images. + rebasedConfig, err := rebasedImage.ConfigFile() + if err != nil { + return nil, fmt.Errorf("could not get config for rebased image: %w", err) + } + // OS/Arch properties from new base + rebasedConfig.Architecture = newConfig.Architecture + rebasedConfig.OS = newConfig.OS + rebasedConfig.OSVersion = newConfig.OSVersion + + // Apply config properties to rebased. + rebasedImage, err = ConfigFile(rebasedImage, rebasedConfig) + if err != nil { + return nil, fmt.Errorf("failed to replace config for rebased image: %w", err) + } + + // Get new base layers and config for history. + newBaseLayers, err := newBase.Layers() + if err != nil { + return nil, fmt.Errorf("could not get new base layers for new base: %w", err) + } + // Add new base layers. + rebasedImage, err = Append(rebasedImage, createAddendums(0, 0, newConfig.History, newBaseLayers)...) + if err != nil { + return nil, fmt.Errorf("failed to append new base image: %w", err) + } + + // Add original layers above the old base. + rebasedImage, err = Append(rebasedImage, createAddendums(len(oldConfig.History), len(oldBaseLayers)+1, origConfig.History, origLayers)...) + if err != nil { + return nil, fmt.Errorf("failed to append original image: %w", err) + } + + return rebasedImage, nil +} + +// createAddendums makes a list of addendums from a history and layers starting from a specific history and layer +// indexes. +func createAddendums(startHistory, startLayer int, history []v1.History, layers []v1.Layer) []Addendum { + var adds []Addendum + // History should be a superset of layers; empty layers (e.g. ENV statements) only exist in history. + // They cannot be iterated identically but must be walked independently, only advancing the iterator for layers + // when a history entry for a non-empty layer is seen. + layerIndex := 0 + for historyIndex := range history { + var layer v1.Layer + emptyLayer := history[historyIndex].EmptyLayer + if !emptyLayer { + layer = layers[layerIndex] + layerIndex++ + } + if historyIndex >= startHistory || layerIndex >= startLayer { + adds = append(adds, Addendum{ + Layer: layer, + History: history[historyIndex], + }) + } + } + // In the event history was malformed or non-existent, append the remaining layers. + for i := layerIndex; i < len(layers); i++ { + if i >= startLayer { + adds = append(adds, Addendum{Layer: layers[layerIndex]}) + } + } + + return adds +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md new file mode 100644 index 00000000..53ebbc6c --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md @@ -0,0 +1,82 @@ +# `partial` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial) + +## Partial Implementations + +There are roughly two kinds of image representations: compressed and uncompressed. + +The implementations for these kinds of images are almost identical, with the only +major difference being how blobs (config and layers) are fetched. This common +code lives in this package, where you provide a _partial_ implementation of a +compressed or uncompressed image, and you get back a full `v1.Image` implementation. + +### Examples + +In a registry, blobs are compressed, so it's easiest to implement a `v1.Image` in terms +of compressed layers. `remote.remoteImage` does this by implementing `CompressedImageCore`: + +```go +type CompressedImageCore interface { + RawConfigFile() ([]byte, error) + MediaType() (types.MediaType, error) + RawManifest() ([]byte, error) + LayerByDigest(v1.Hash) (CompressedLayer, error) +} +``` + +In a tarball, blobs are (often) uncompressed, so it's easiest to implement a `v1.Image` in terms +of uncompressed layers. `tarball.uncompressedImage` does this by implementing `UncompressedImageCore`: + +```go +type UncompressedImageCore interface { + RawConfigFile() ([]byte, error) + MediaType() (types.MediaType, error) + LayerByDiffID(v1.Hash) (UncompressedLayer, error) +} +``` + +## Optional Methods + +Where possible, we access some information via optional methods as an optimization. + +### [`partial.Descriptor`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#Descriptor) + +There are some properties of a [`Descriptor`](https://github.com/opencontainers/image-spec/blob/master/descriptor.md#properties) that aren't derivable from just image data: + +* `MediaType` +* `Platform` +* `URLs` +* `Annotations` + +For example, in a `tarball.Image`, there is a `LayerSources` field that contains +an entire layer descriptor with `URLs` information for foreign layers. This +information can be passed through to callers by implementing this optional +`Descriptor` method. + +See [`#654`](https://github.com/google/go-containerregistry/pull/654). + +### [`partial.UncompressedSize`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#UncompressedSize) + +Usually, you don't need to know the uncompressed size of a layer, since that +information isn't stored in a config file (just he sha256 is needed); however, +there are cases where it is very helpful to know the layer size, e.g. when +writing the uncompressed layer into a tarball. + +See [`#655`](https://github.com/google/go-containerregistry/pull/655). + +### [`partial.Exists`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/partial#Exists) + +We generally don't care about the existence of something as granular as a +layer, and would rather ensure all the invariants of an image are upheld via +the `validate` package. However, there are situations where we want to do a +quick smoke test to ensure that the underlying storage engine hasn't been +corrupted by something e.g. deleting files or blobs. Thus, we've exposed an +optional `Exists` method that does an existence check without actually reading +any bytes. + +The `remote` package implements this via `HEAD` requests. + +The `layout` package implements this via `os.Stat`. + +See [`#838`](https://github.com/google/go-containerregistry/pull/838). diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go new file mode 100644 index 00000000..44989ac9 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go @@ -0,0 +1,188 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "io" + + "github.com/google/go-containerregistry/internal/and" + "github.com/google/go-containerregistry/internal/compression" + "github.com/google/go-containerregistry/internal/gzip" + "github.com/google/go-containerregistry/internal/zstd" + comp "github.com/google/go-containerregistry/pkg/compression" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// CompressedLayer represents the bare minimum interface a natively +// compressed layer must implement for us to produce a v1.Layer +type CompressedLayer interface { + // Digest returns the Hash of the compressed layer. + Digest() (v1.Hash, error) + + // Compressed returns an io.ReadCloser for the compressed layer contents. + Compressed() (io.ReadCloser, error) + + // Size returns the compressed size of the Layer. + Size() (int64, error) + + // Returns the mediaType for the compressed Layer + MediaType() (types.MediaType, error) +} + +// compressedLayerExtender implements v1.Image using the compressed base properties. +type compressedLayerExtender struct { + CompressedLayer +} + +// Uncompressed implements v1.Layer +func (cle *compressedLayerExtender) Uncompressed() (io.ReadCloser, error) { + rc, err := cle.Compressed() + if err != nil { + return nil, err + } + + // Often, the "compressed" bytes are not actually-compressed. + // Peek at the first two bytes to determine whether it's correct to + // wrap this with gzip.UnzipReadCloser or zstd.UnzipReadCloser. + cp, pr, err := compression.PeekCompression(rc) + if err != nil { + return nil, err + } + + prc := &and.ReadCloser{ + Reader: pr, + CloseFunc: rc.Close, + } + + switch cp { + case comp.GZip: + return gzip.UnzipReadCloser(prc) + case comp.ZStd: + return zstd.UnzipReadCloser(prc) + default: + return prc, nil + } +} + +// DiffID implements v1.Layer +func (cle *compressedLayerExtender) DiffID() (v1.Hash, error) { + // If our nested CompressedLayer implements DiffID, + // then delegate to it instead. + if wdi, ok := cle.CompressedLayer.(WithDiffID); ok { + return wdi.DiffID() + } + r, err := cle.Uncompressed() + if err != nil { + return v1.Hash{}, err + } + defer r.Close() + h, _, err := v1.SHA256(r) + return h, err +} + +// CompressedToLayer fills in the missing methods from a CompressedLayer so that it implements v1.Layer +func CompressedToLayer(ul CompressedLayer) (v1.Layer, error) { + return &compressedLayerExtender{ul}, nil +} + +// CompressedImageCore represents the base minimum interface a natively +// compressed image must implement for us to produce a v1.Image. +type CompressedImageCore interface { + ImageCore + + // RawManifest returns the serialized bytes of the manifest. + RawManifest() ([]byte, error) + + // LayerByDigest is a variation on the v1.Image method, which returns + // a CompressedLayer instead. + LayerByDigest(v1.Hash) (CompressedLayer, error) +} + +// compressedImageExtender implements v1.Image by extending CompressedImageCore with the +// appropriate methods computed from the minimal core. +type compressedImageExtender struct { + CompressedImageCore +} + +// Assert that our extender type completes the v1.Image interface +var _ v1.Image = (*compressedImageExtender)(nil) + +// Digest implements v1.Image +func (i *compressedImageExtender) Digest() (v1.Hash, error) { + return Digest(i) +} + +// ConfigName implements v1.Image +func (i *compressedImageExtender) ConfigName() (v1.Hash, error) { + return ConfigName(i) +} + +// Layers implements v1.Image +func (i *compressedImageExtender) Layers() ([]v1.Layer, error) { + hs, err := FSLayers(i) + if err != nil { + return nil, err + } + ls := make([]v1.Layer, 0, len(hs)) + for _, h := range hs { + l, err := i.LayerByDigest(h) + if err != nil { + return nil, err + } + ls = append(ls, l) + } + return ls, nil +} + +// LayerByDigest implements v1.Image +func (i *compressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { + cl, err := i.CompressedImageCore.LayerByDigest(h) + if err != nil { + return nil, err + } + return CompressedToLayer(cl) +} + +// LayerByDiffID implements v1.Image +func (i *compressedImageExtender) LayerByDiffID(h v1.Hash) (v1.Layer, error) { + h, err := DiffIDToBlob(i, h) + if err != nil { + return nil, err + } + return i.LayerByDigest(h) +} + +// ConfigFile implements v1.Image +func (i *compressedImageExtender) ConfigFile() (*v1.ConfigFile, error) { + return ConfigFile(i) +} + +// Manifest implements v1.Image +func (i *compressedImageExtender) Manifest() (*v1.Manifest, error) { + return Manifest(i) +} + +// Size implements v1.Image +func (i *compressedImageExtender) Size() (int64, error) { + return Size(i) +} + +// CompressedToImage fills in the missing methods from a CompressedImageCore so that it implements v1.Image +func CompressedToImage(cic CompressedImageCore) (v1.Image, error) { + return &compressedImageExtender{ + CompressedImageCore: cic, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go new file mode 100644 index 00000000..153dfe4d --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package partial defines methods for building up a v1.Image from +// minimal subsets that are sufficient for defining a v1.Image. +package partial diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go new file mode 100644 index 00000000..c65f45e0 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go @@ -0,0 +1,28 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// ImageCore is the core set of properties without which we cannot build a v1.Image +type ImageCore interface { + // RawConfigFile returns the serialized bytes of this image's config file. + RawConfigFile() ([]byte, error) + + // MediaType of this image's manifest. + MediaType() (types.MediaType, error) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go new file mode 100644 index 00000000..f17f2744 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go @@ -0,0 +1,85 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "fmt" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/match" +) + +// FindManifests given a v1.ImageIndex, find the manifests that fit the matcher. +func FindManifests(index v1.ImageIndex, matcher match.Matcher) ([]v1.Descriptor, error) { + // get the actual manifest list + indexManifest, err := index.IndexManifest() + if err != nil { + return nil, fmt.Errorf("unable to get raw index: %w", err) + } + manifests := []v1.Descriptor{} + // try to get the root of our image + for _, manifest := range indexManifest.Manifests { + if matcher(manifest) { + manifests = append(manifests, manifest) + } + } + return manifests, nil +} + +// FindImages given a v1.ImageIndex, find the images that fit the matcher. If a Descriptor +// matches the provider Matcher, but the referenced item is not an Image, ignores it. +// Only returns those that match the Matcher and are images. +func FindImages(index v1.ImageIndex, matcher match.Matcher) ([]v1.Image, error) { + matches := []v1.Image{} + manifests, err := FindManifests(index, matcher) + if err != nil { + return nil, err + } + for _, desc := range manifests { + // if it is not an image, ignore it + if !desc.MediaType.IsImage() { + continue + } + img, err := index.Image(desc.Digest) + if err != nil { + return nil, err + } + matches = append(matches, img) + } + return matches, nil +} + +// FindIndexes given a v1.ImageIndex, find the indexes that fit the matcher. If a Descriptor +// matches the provider Matcher, but the referenced item is not an Index, ignores it. +// Only returns those that match the Matcher and are indexes. +func FindIndexes(index v1.ImageIndex, matcher match.Matcher) ([]v1.ImageIndex, error) { + matches := []v1.ImageIndex{} + manifests, err := FindManifests(index, matcher) + if err != nil { + return nil, err + } + for _, desc := range manifests { + if !desc.MediaType.IsIndex() { + continue + } + // if it is not an index, ignore it + idx, err := index.ImageIndex(desc.Digest) + if err != nil { + return nil, err + } + matches = append(matches, idx) + } + return matches, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go new file mode 100644 index 00000000..df20d3aa --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go @@ -0,0 +1,223 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "bytes" + "io" + "sync" + + "github.com/google/go-containerregistry/internal/gzip" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// UncompressedLayer represents the bare minimum interface a natively +// uncompressed layer must implement for us to produce a v1.Layer +type UncompressedLayer interface { + // DiffID returns the Hash of the uncompressed layer. + DiffID() (v1.Hash, error) + + // Uncompressed returns an io.ReadCloser for the uncompressed layer contents. + Uncompressed() (io.ReadCloser, error) + + // Returns the mediaType for the compressed Layer + MediaType() (types.MediaType, error) +} + +// uncompressedLayerExtender implements v1.Image using the uncompressed base properties. +type uncompressedLayerExtender struct { + UncompressedLayer + // Memoize size/hash so that the methods aren't twice as + // expensive as doing this manually. + hash v1.Hash + size int64 + hashSizeError error + once sync.Once +} + +// Compressed implements v1.Layer +func (ule *uncompressedLayerExtender) Compressed() (io.ReadCloser, error) { + u, err := ule.Uncompressed() + if err != nil { + return nil, err + } + return gzip.ReadCloser(u), nil +} + +// Digest implements v1.Layer +func (ule *uncompressedLayerExtender) Digest() (v1.Hash, error) { + ule.calcSizeHash() + return ule.hash, ule.hashSizeError +} + +// Size implements v1.Layer +func (ule *uncompressedLayerExtender) Size() (int64, error) { + ule.calcSizeHash() + return ule.size, ule.hashSizeError +} + +func (ule *uncompressedLayerExtender) calcSizeHash() { + ule.once.Do(func() { + var r io.ReadCloser + r, ule.hashSizeError = ule.Compressed() + if ule.hashSizeError != nil { + return + } + defer r.Close() + ule.hash, ule.size, ule.hashSizeError = v1.SHA256(r) + }) +} + +// UncompressedToLayer fills in the missing methods from an UncompressedLayer so that it implements v1.Layer +func UncompressedToLayer(ul UncompressedLayer) (v1.Layer, error) { + return &uncompressedLayerExtender{UncompressedLayer: ul}, nil +} + +// UncompressedImageCore represents the bare minimum interface a natively +// uncompressed image must implement for us to produce a v1.Image +type UncompressedImageCore interface { + ImageCore + + // LayerByDiffID is a variation on the v1.Image method, which returns + // an UncompressedLayer instead. + LayerByDiffID(v1.Hash) (UncompressedLayer, error) +} + +// UncompressedToImage fills in the missing methods from an UncompressedImageCore so that it implements v1.Image. +func UncompressedToImage(uic UncompressedImageCore) (v1.Image, error) { + return &uncompressedImageExtender{ + UncompressedImageCore: uic, + }, nil +} + +// uncompressedImageExtender implements v1.Image by extending UncompressedImageCore with the +// appropriate methods computed from the minimal core. +type uncompressedImageExtender struct { + UncompressedImageCore + + lock sync.Mutex + manifest *v1.Manifest +} + +// Assert that our extender type completes the v1.Image interface +var _ v1.Image = (*uncompressedImageExtender)(nil) + +// Digest implements v1.Image +func (i *uncompressedImageExtender) Digest() (v1.Hash, error) { + return Digest(i) +} + +// Manifest implements v1.Image +func (i *uncompressedImageExtender) Manifest() (*v1.Manifest, error) { + i.lock.Lock() + defer i.lock.Unlock() + if i.manifest != nil { + return i.manifest, nil + } + + b, err := i.RawConfigFile() + if err != nil { + return nil, err + } + + cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b)) + if err != nil { + return nil, err + } + + m := &v1.Manifest{ + SchemaVersion: 2, + MediaType: types.DockerManifestSchema2, + Config: v1.Descriptor{ + MediaType: types.DockerConfigJSON, + Size: cfgSize, + Digest: cfgHash, + }, + } + + ls, err := i.Layers() + if err != nil { + return nil, err + } + + m.Layers = make([]v1.Descriptor, len(ls)) + for i, l := range ls { + desc, err := Descriptor(l) + if err != nil { + return nil, err + } + + m.Layers[i] = *desc + } + + i.manifest = m + return i.manifest, nil +} + +// RawManifest implements v1.Image +func (i *uncompressedImageExtender) RawManifest() ([]byte, error) { + return RawManifest(i) +} + +// Size implements v1.Image +func (i *uncompressedImageExtender) Size() (int64, error) { + return Size(i) +} + +// ConfigName implements v1.Image +func (i *uncompressedImageExtender) ConfigName() (v1.Hash, error) { + return ConfigName(i) +} + +// ConfigFile implements v1.Image +func (i *uncompressedImageExtender) ConfigFile() (*v1.ConfigFile, error) { + return ConfigFile(i) +} + +// Layers implements v1.Image +func (i *uncompressedImageExtender) Layers() ([]v1.Layer, error) { + diffIDs, err := DiffIDs(i) + if err != nil { + return nil, err + } + ls := make([]v1.Layer, 0, len(diffIDs)) + for _, h := range diffIDs { + l, err := i.LayerByDiffID(h) + if err != nil { + return nil, err + } + ls = append(ls, l) + } + return ls, nil +} + +// LayerByDiffID implements v1.Image +func (i *uncompressedImageExtender) LayerByDiffID(diffID v1.Hash) (v1.Layer, error) { + ul, err := i.UncompressedImageCore.LayerByDiffID(diffID) + if err != nil { + return nil, err + } + return UncompressedToLayer(ul) +} + +// LayerByDigest implements v1.Image +func (i *uncompressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { + diffID, err := BlobToDiffID(i, h) + if err != nil { + return nil, err + } + return i.LayerByDiffID(diffID) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go new file mode 100644 index 00000000..3ad49928 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go @@ -0,0 +1,401 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// WithRawConfigFile defines the subset of v1.Image used by these helper methods +type WithRawConfigFile interface { + // RawConfigFile returns the serialized bytes of this image's config file. + RawConfigFile() ([]byte, error) +} + +// ConfigFile is a helper for implementing v1.Image +func ConfigFile(i WithRawConfigFile) (*v1.ConfigFile, error) { + b, err := i.RawConfigFile() + if err != nil { + return nil, err + } + return v1.ParseConfigFile(bytes.NewReader(b)) +} + +// ConfigName is a helper for implementing v1.Image +func ConfigName(i WithRawConfigFile) (v1.Hash, error) { + b, err := i.RawConfigFile() + if err != nil { + return v1.Hash{}, err + } + h, _, err := v1.SHA256(bytes.NewReader(b)) + return h, err +} + +type configLayer struct { + hash v1.Hash + content []byte +} + +// Digest implements v1.Layer +func (cl *configLayer) Digest() (v1.Hash, error) { + return cl.hash, nil +} + +// DiffID implements v1.Layer +func (cl *configLayer) DiffID() (v1.Hash, error) { + return cl.hash, nil +} + +// Uncompressed implements v1.Layer +func (cl *configLayer) Uncompressed() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewBuffer(cl.content)), nil +} + +// Compressed implements v1.Layer +func (cl *configLayer) Compressed() (io.ReadCloser, error) { + return io.NopCloser(bytes.NewBuffer(cl.content)), nil +} + +// Size implements v1.Layer +func (cl *configLayer) Size() (int64, error) { + return int64(len(cl.content)), nil +} + +func (cl *configLayer) MediaType() (types.MediaType, error) { + // Defaulting this to OCIConfigJSON as it should remain + // backwards compatible with DockerConfigJSON + return types.OCIConfigJSON, nil +} + +var _ v1.Layer = (*configLayer)(nil) + +// withConfigLayer allows partial image implementations to provide a layer +// for their config file. +type withConfigLayer interface { + ConfigLayer() (v1.Layer, error) +} + +// ConfigLayer implements v1.Layer from the raw config bytes. +// This is so that clients (e.g. remote) can access the config as a blob. +// +// Images that want to return a specific layer implementation can implement +// withConfigLayer. +func ConfigLayer(i WithRawConfigFile) (v1.Layer, error) { + if wcl, ok := unwrap(i).(withConfigLayer); ok { + return wcl.ConfigLayer() + } + + h, err := ConfigName(i) + if err != nil { + return nil, err + } + rcfg, err := i.RawConfigFile() + if err != nil { + return nil, err + } + return &configLayer{ + hash: h, + content: rcfg, + }, nil +} + +// WithConfigFile defines the subset of v1.Image used by these helper methods +type WithConfigFile interface { + // ConfigFile returns this image's config file. + ConfigFile() (*v1.ConfigFile, error) +} + +// DiffIDs is a helper for implementing v1.Image +func DiffIDs(i WithConfigFile) ([]v1.Hash, error) { + cfg, err := i.ConfigFile() + if err != nil { + return nil, err + } + return cfg.RootFS.DiffIDs, nil +} + +// RawConfigFile is a helper for implementing v1.Image +func RawConfigFile(i WithConfigFile) ([]byte, error) { + cfg, err := i.ConfigFile() + if err != nil { + return nil, err + } + return json.Marshal(cfg) +} + +// WithRawManifest defines the subset of v1.Image used by these helper methods +type WithRawManifest interface { + // RawManifest returns the serialized bytes of this image's config file. + RawManifest() ([]byte, error) +} + +// Digest is a helper for implementing v1.Image +func Digest(i WithRawManifest) (v1.Hash, error) { + mb, err := i.RawManifest() + if err != nil { + return v1.Hash{}, err + } + digest, _, err := v1.SHA256(bytes.NewReader(mb)) + return digest, err +} + +// Manifest is a helper for implementing v1.Image +func Manifest(i WithRawManifest) (*v1.Manifest, error) { + b, err := i.RawManifest() + if err != nil { + return nil, err + } + return v1.ParseManifest(bytes.NewReader(b)) +} + +// WithManifest defines the subset of v1.Image used by these helper methods +type WithManifest interface { + // Manifest returns this image's Manifest object. + Manifest() (*v1.Manifest, error) +} + +// RawManifest is a helper for implementing v1.Image +func RawManifest(i WithManifest) ([]byte, error) { + m, err := i.Manifest() + if err != nil { + return nil, err + } + return json.Marshal(m) +} + +// Size is a helper for implementing v1.Image +func Size(i WithRawManifest) (int64, error) { + b, err := i.RawManifest() + if err != nil { + return -1, err + } + return int64(len(b)), nil +} + +// FSLayers is a helper for implementing v1.Image +func FSLayers(i WithManifest) ([]v1.Hash, error) { + m, err := i.Manifest() + if err != nil { + return nil, err + } + fsl := make([]v1.Hash, len(m.Layers)) + for i, l := range m.Layers { + fsl[i] = l.Digest + } + return fsl, nil +} + +// BlobSize is a helper for implementing v1.Image +func BlobSize(i WithManifest, h v1.Hash) (int64, error) { + d, err := BlobDescriptor(i, h) + if err != nil { + return -1, err + } + return d.Size, nil +} + +// BlobDescriptor is a helper for implementing v1.Image +func BlobDescriptor(i WithManifest, h v1.Hash) (*v1.Descriptor, error) { + m, err := i.Manifest() + if err != nil { + return nil, err + } + + if m.Config.Digest == h { + return &m.Config, nil + } + + for _, l := range m.Layers { + if l.Digest == h { + return &l, nil + } + } + return nil, fmt.Errorf("blob %v not found", h) +} + +// WithManifestAndConfigFile defines the subset of v1.Image used by these helper methods +type WithManifestAndConfigFile interface { + WithConfigFile + + // Manifest returns this image's Manifest object. + Manifest() (*v1.Manifest, error) +} + +// BlobToDiffID is a helper for mapping between compressed +// and uncompressed blob hashes. +func BlobToDiffID(i WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) { + blobs, err := FSLayers(i) + if err != nil { + return v1.Hash{}, err + } + diffIDs, err := DiffIDs(i) + if err != nil { + return v1.Hash{}, err + } + if len(blobs) != len(diffIDs) { + return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs)) + } + for i, blob := range blobs { + if blob == h { + return diffIDs[i], nil + } + } + return v1.Hash{}, fmt.Errorf("unknown blob %v", h) +} + +// DiffIDToBlob is a helper for mapping between uncompressed +// and compressed blob hashes. +func DiffIDToBlob(wm WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) { + blobs, err := FSLayers(wm) + if err != nil { + return v1.Hash{}, err + } + diffIDs, err := DiffIDs(wm) + if err != nil { + return v1.Hash{}, err + } + if len(blobs) != len(diffIDs) { + return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs)) + } + for i, diffID := range diffIDs { + if diffID == h { + return blobs[i], nil + } + } + return v1.Hash{}, fmt.Errorf("unknown diffID %v", h) +} + +// WithDiffID defines the subset of v1.Layer for exposing the DiffID method. +type WithDiffID interface { + DiffID() (v1.Hash, error) +} + +// withDescriptor allows partial layer implementations to provide a layer +// descriptor to the partial image manifest builder. This allows partial +// uncompressed layers to provide foreign layer metadata like URLs to the +// uncompressed image manifest. +type withDescriptor interface { + Descriptor() (*v1.Descriptor, error) +} + +// Describable represents something for which we can produce a v1.Descriptor. +type Describable interface { + Digest() (v1.Hash, error) + MediaType() (types.MediaType, error) + Size() (int64, error) +} + +// Descriptor returns a v1.Descriptor given a Describable. It also encodes +// some logic for unwrapping things that have been wrapped by +// CompressedToLayer, UncompressedToLayer, CompressedToImage, or +// UncompressedToImage. +func Descriptor(d Describable) (*v1.Descriptor, error) { + // If Describable implements Descriptor itself, return that. + if wd, ok := unwrap(d).(withDescriptor); ok { + return wd.Descriptor() + } + + // If all else fails, compute the descriptor from the individual methods. + var ( + desc v1.Descriptor + err error + ) + + if desc.Size, err = d.Size(); err != nil { + return nil, err + } + if desc.Digest, err = d.Digest(); err != nil { + return nil, err + } + if desc.MediaType, err = d.MediaType(); err != nil { + return nil, err + } + + return &desc, nil +} + +type withUncompressedSize interface { + UncompressedSize() (int64, error) +} + +// UncompressedSize returns the size of the Uncompressed layer. If the +// underlying implementation doesn't implement UncompressedSize directly, +// this will compute the uncompressedSize by reading everything returned +// by Compressed(). This is potentially expensive and may consume the contents +// for streaming layers. +func UncompressedSize(l v1.Layer) (int64, error) { + // If the layer implements UncompressedSize itself, return that. + if wus, ok := unwrap(l).(withUncompressedSize); ok { + return wus.UncompressedSize() + } + + // The layer doesn't implement UncompressedSize, we need to compute it. + rc, err := l.Uncompressed() + if err != nil { + return -1, err + } + defer rc.Close() + + return io.Copy(io.Discard, rc) +} + +type withExists interface { + Exists() (bool, error) +} + +// Exists checks to see if a layer exists. This is a hack to work around the +// mistakes of the partial package. Don't use this. +func Exists(l v1.Layer) (bool, error) { + // If the layer implements Exists itself, return that. + if we, ok := unwrap(l).(withExists); ok { + return we.Exists() + } + + // The layer doesn't implement Exists, so we hope that calling Compressed() + // is enough to trigger an error if the layer does not exist. + rc, err := l.Compressed() + if err != nil { + return false, err + } + defer rc.Close() + + // We may want to try actually reading a single byte, but if we need to do + // that, we should just fix this hack. + return true, nil +} + +// Recursively unwrap our wrappers so that we can check for the original implementation. +// We might want to expose this? +func unwrap(i any) any { + if ule, ok := i.(*uncompressedLayerExtender); ok { + return unwrap(ule.UncompressedLayer) + } + if cle, ok := i.(*compressedLayerExtender); ok { + return unwrap(cle.CompressedLayer) + } + if uie, ok := i.(*uncompressedImageExtender); ok { + return unwrap(uie.UncompressedImageCore) + } + if cie, ok := i.(*compressedImageExtender); ok { + return unwrap(cie.CompressedImageCore) + } + return i +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go b/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go new file mode 100644 index 00000000..9ee91ee2 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go @@ -0,0 +1,108 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "fmt" + "sort" + "strings" +) + +// Platform represents the target os/arch for an image. +type Platform struct { + Architecture string `json:"architecture"` + OS string `json:"os"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + Features []string `json:"features,omitempty"` +} + +func (p Platform) String() string { + if p.OS == "" { + return "" + } + var b strings.Builder + b.WriteString(p.OS) + if p.Architecture != "" { + b.WriteString("/") + b.WriteString(p.Architecture) + } + if p.Variant != "" { + b.WriteString("/") + b.WriteString(p.Variant) + } + if p.OSVersion != "" { + b.WriteString(":") + b.WriteString(p.OSVersion) + } + return b.String() +} + +// ParsePlatform parses a string representing a Platform, if possible. +func ParsePlatform(s string) (*Platform, error) { + var p Platform + parts := strings.Split(strings.TrimSpace(s), ":") + if len(parts) == 2 { + p.OSVersion = parts[1] + } + parts = strings.Split(parts[0], "/") + if len(parts) > 0 { + p.OS = parts[0] + } + if len(parts) > 1 { + p.Architecture = parts[1] + } + if len(parts) > 2 { + p.Variant = parts[2] + } + if len(parts) > 3 { + return nil, fmt.Errorf("too many slashes in platform spec: %s", s) + } + return &p, nil +} + +// Equals returns true if the given platform is semantically equivalent to this one. +// The order of Features and OSFeatures is not important. +func (p Platform) Equals(o Platform) bool { + return p.OS == o.OS && + p.Architecture == o.Architecture && + p.Variant == o.Variant && + p.OSVersion == o.OSVersion && + stringSliceEqualIgnoreOrder(p.OSFeatures, o.OSFeatures) && + stringSliceEqualIgnoreOrder(p.Features, o.Features) +} + +// stringSliceEqual compares 2 string slices and returns if their contents are identical. +func stringSliceEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i, elm := range a { + if elm != b[i] { + return false + } + } + return true +} + +// stringSliceEqualIgnoreOrder compares 2 string slices and returns if their contents are identical, ignoring order +func stringSliceEqualIgnoreOrder(a, b []string) bool { + if a != nil && b != nil { + sort.Strings(a) + sort.Strings(b) + } + return stringSliceEqual(a, b) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go b/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go new file mode 100644 index 00000000..844f04d9 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go @@ -0,0 +1,25 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +// Update representation of an update of transfer progress. Some functions +// in this module can take a channel to which updates will be sent while a +// transfer is in progress. +// +k8s:deepcopy-gen=false +type Update struct { + Total int64 + Complete int64 + Error error +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md new file mode 100644 index 00000000..c1e81b31 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md @@ -0,0 +1,117 @@ +# `remote` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) + +The `remote` package implements a client for accessing a registry, +per the [OCI distribution spec](https://github.com/opencontainers/distribution-spec/blob/master/spec.md). + +It leans heavily on the lower level [`transport`](/pkg/v1/remote/transport) package, which handles the +authentication handshake and structured errors. + +## Usage + +```go +package main + +import ( + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +func main() { + ref, err := name.ParseReference("gcr.io/google-containers/pause") + if err != nil { + panic(err) + } + + img, err := remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain)) + if err != nil { + panic(err) + } + + // do stuff with img +} +``` + +## Structure + +

+ +

+ + +## Background + +There are a lot of confusingly similar terms that come up when talking about images in registries. + +### Anatomy of an image + +In general... + +* A tag refers to an image manifest. +* An image manifest references a config file and an orderered list of _compressed_ layers by sha256 digest. +* A config file references an ordered list of _uncompressed_ layers by sha256 digest and contains runtime configuration. +* The sha256 digest of the config file is the [image id](https://github.com/opencontainers/image-spec/blob/master/config.md#imageid) for the image. + +For example, an image with two layers would look something like this: + +![image anatomy](/images/image-anatomy.dot.svg) + +### Anatomy of an index + +In the normal case, an [index](https://github.com/opencontainers/image-spec/blob/master/image-index.md) is used to represent a multi-platform image. +This was the original use case for a [manifest +list](https://docs.docker.com/registry/spec/manifest-v2-2/#manifest-list). + +![image index anatomy](/images/index-anatomy.dot.svg) + +It is possible for an index to reference another index, per the OCI +[image-spec](https://github.com/opencontainers/image-spec/blob/master/media-types.md#compatibility-matrix). +In theory, both an image and image index can reference arbitrary things via +[descriptors](https://github.com/opencontainers/image-spec/blob/master/descriptor.md), +e.g. see the [image layout +example](https://github.com/opencontainers/image-spec/blob/master/image-layout.md#index-example), +which references an application/xml file from an image index. + +That could look something like this: + +![strange image index anatomy](/images/index-anatomy-strange.dot.svg) + +Using a recursive index like this might not be possible with all registries, +but this flexibility allows for some interesting applications, e.g. the +[OCI Artifacts](https://github.com/opencontainers/artifacts) effort. + +### Anatomy of an image upload + +The structure of an image requires a delicate ordering when uploading an image to a registry. +Below is a (slightly simplified) figure that describes how an image is prepared for upload +to a registry and how the data flows between various artifacts: + +![upload](/images/upload.dot.svg) + +Note that: + +* A config file references the uncompressed layer contents by sha256. +* A manifest references the compressed layer contents by sha256 and the size of the layer. +* A manifest references the config file contents by sha256 and the size of the file. + +It follows that during an upload, we need to upload layers before the config file, +and we need to upload the config file before the manifest. + +Sometimes, we know all of this information ahead of time, (e.g. when copying from remote.Image), +so the ordering is less important. + +In other cases, e.g. when using a [`stream.Layer`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream#Layer), +we can't compute anything until we have already uploaded the layer, so we need to be careful about ordering. + +## Caveats + +### schema 1 + +This package does not support schema 1 images, see [`#377`](https://github.com/google/go-containerregistry/issues/377), +however, it's possible to do _something_ useful with them via [`remote.Get`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote#Get), +which doesn't try to interpret what is returned by the registry. + +[`crane.Copy`](https://godoc.org/github.com/google/go-containerregistry/pkg/crane#Copy) takes advantage of this to implement support for copying schema 1 images, +see [here](https://github.com/google/go-containerregistry/blob/main/pkg/internal/legacy/copy.go). diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go new file mode 100644 index 00000000..eb4306f2 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go @@ -0,0 +1,154 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +type catalog struct { + Repos []string `json:"repositories"` +} + +// CatalogPage calls /_catalog, returning the list of repositories on the registry. +func CatalogPage(target name.Registry, last string, n int, options ...Option) ([]string, error) { + o, err := makeOptions(target, options...) + if err != nil { + return nil, err + } + + scopes := []string{target.Scope(transport.PullScope)} + tr, err := transport.NewWithContext(o.context, target, o.auth, o.transport, scopes) + if err != nil { + return nil, err + } + + query := fmt.Sprintf("last=%s&n=%d", url.QueryEscape(last), n) + + uri := url.URL{ + Scheme: target.Scheme(), + Host: target.RegistryStr(), + Path: "/v2/_catalog", + RawQuery: query, + } + + client := http.Client{Transport: tr} + req, err := http.NewRequest(http.MethodGet, uri.String(), nil) + if err != nil { + return nil, err + } + resp, err := client.Do(req.WithContext(o.context)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, err + } + + var parsed catalog + if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { + return nil, err + } + + return parsed.Repos, nil +} + +// Catalog calls /_catalog, returning the list of repositories on the registry. +func Catalog(ctx context.Context, target name.Registry, options ...Option) ([]string, error) { + o, err := makeOptions(target, options...) + if err != nil { + return nil, err + } + + scopes := []string{target.Scope(transport.PullScope)} + tr, err := transport.NewWithContext(o.context, target, o.auth, o.transport, scopes) + if err != nil { + return nil, err + } + + uri := &url.URL{ + Scheme: target.Scheme(), + Host: target.RegistryStr(), + Path: "/v2/_catalog", + } + + if o.pageSize > 0 { + uri.RawQuery = fmt.Sprintf("n=%d", o.pageSize) + } + + client := http.Client{Transport: tr} + + // WithContext overrides the ctx passed directly. + if o.context != context.Background() { + ctx = o.context + } + + var ( + parsed catalog + repoList []string + ) + + // get responses until there is no next page + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + req, err := http.NewRequest("GET", uri.String(), nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, err + } + + if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { + return nil, err + } + if err := resp.Body.Close(); err != nil { + return nil, err + } + + repoList = append(repoList, parsed.Repos...) + + uri, err = getNextPageURL(resp) + if err != nil { + return nil, err + } + // no next page + if uri == nil { + break + } + } + return repoList, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go new file mode 100644 index 00000000..b4395c23 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go @@ -0,0 +1,72 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "fmt" + "net/http" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +// CheckPushPermission returns an error if the given keychain cannot authorize +// a push operation to the given ref. +// +// This can be useful to check whether the caller has permission to push an +// image before doing work to construct the image. +// +// TODO(#412): Remove the need for this method. +func CheckPushPermission(ref name.Reference, kc authn.Keychain, t http.RoundTripper) error { + auth, err := kc.Resolve(ref.Context().Registry) + if err != nil { + return fmt.Errorf("resolving authorization for %v failed: %w", ref.Context().Registry, err) + } + + scopes := []string{ref.Scope(transport.PushScope)} + tr, err := transport.NewWithContext(context.TODO(), ref.Context().Registry, auth, t, scopes) + if err != nil { + return fmt.Errorf("creating push check transport for %v failed: %w", ref.Context().Registry, err) + } + // TODO(jasonhall): Against GCR, just doing the token handshake is + // enough, but this doesn't extend to Dockerhub + // (https://github.com/docker/hub-feedback/issues/1771), so we actually + // need to initiate an upload to tell whether the credentials can + // authorize a push. Figure out how to return early here when we can, + // to avoid a roundtrip for spec-compliant registries. + w := writer{ + repo: ref.Context(), + client: &http.Client{Transport: tr}, + } + loc, _, err := w.initiateUpload(context.Background(), "", "", "") + if loc != "" { + // Since we're only initiating the upload to check whether we + // can, we should attempt to cancel it, in case initiating + // reserves some resources on the server. We shouldn't wait for + // cancelling to complete, and we don't care if it fails. + go w.cancelUpload(loc) + } + return err +} + +func (w *writer) cancelUpload(loc string) { + req, err := http.NewRequest(http.MethodDelete, loc, nil) + if err != nil { + return + } + _, _ = w.client.Do(req) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go new file mode 100644 index 00000000..3b902271 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go @@ -0,0 +1,57 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +// Delete removes the specified image reference from the remote registry. +func Delete(ref name.Reference, options ...Option) error { + o, err := makeOptions(ref.Context(), options...) + if err != nil { + return err + } + scopes := []string{ref.Scope(transport.DeleteScope)} + tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) + if err != nil { + return err + } + c := &http.Client{Transport: tr} + + u := url.URL{ + Scheme: ref.Context().Registry.Scheme(), + Host: ref.Context().RegistryStr(), + Path: fmt.Sprintf("/v2/%s/manifests/%s", ref.Context().RepositoryStr(), ref.Identifier()), + } + + req, err := http.NewRequest(http.MethodDelete, u.String(), nil) + if err != nil { + return err + } + + resp, err := c.Do(req.WithContext(o.context)) + if err != nil { + return err + } + defer resp.Body.Close() + + return transport.CheckError(resp, http.StatusOK, http.StatusAccepted) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go new file mode 100644 index 00000000..9c8ae168 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go @@ -0,0 +1,430 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/google/go-containerregistry/internal/redact" + "github.com/google/go-containerregistry/internal/verify" + "github.com/google/go-containerregistry/pkg/logs" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// ErrSchema1 indicates that we received a schema1 manifest from the registry. +// This library doesn't have plans to support this legacy image format: +// https://github.com/google/go-containerregistry/issues/377 +type ErrSchema1 struct { + schema string +} + +// newErrSchema1 returns an ErrSchema1 with the unexpected MediaType. +func newErrSchema1(schema types.MediaType) error { + return &ErrSchema1{ + schema: string(schema), + } +} + +// Error implements error. +func (e *ErrSchema1) Error() string { + return fmt.Sprintf("unsupported MediaType: %q, see https://github.com/google/go-containerregistry/issues/377", e.schema) +} + +// Descriptor provides access to metadata about remote artifact and accessors +// for efficiently converting it into a v1.Image or v1.ImageIndex. +type Descriptor struct { + fetcher + v1.Descriptor + Manifest []byte + + // So we can share this implementation with Image.. + platform v1.Platform +} + +// RawManifest exists to satisfy the Taggable interface. +func (d *Descriptor) RawManifest() ([]byte, error) { + return d.Manifest, nil +} + +// Get returns a remote.Descriptor for the given reference. The response from +// the registry is left un-interpreted, for the most part. This is useful for +// querying what kind of artifact a reference represents. +// +// See Head if you don't need the response body. +func Get(ref name.Reference, options ...Option) (*Descriptor, error) { + acceptable := []types.MediaType{ + // Just to look at them. + types.DockerManifestSchema1, + types.DockerManifestSchema1Signed, + } + acceptable = append(acceptable, acceptableImageMediaTypes...) + acceptable = append(acceptable, acceptableIndexMediaTypes...) + return get(ref, acceptable, options...) +} + +// Head returns a v1.Descriptor for the given reference by issuing a HEAD +// request. +// +// Note that the server response will not have a body, so any errors encountered +// should be retried with Get to get more details. +func Head(ref name.Reference, options ...Option) (*v1.Descriptor, error) { + acceptable := []types.MediaType{ + // Just to look at them. + types.DockerManifestSchema1, + types.DockerManifestSchema1Signed, + } + acceptable = append(acceptable, acceptableImageMediaTypes...) + acceptable = append(acceptable, acceptableIndexMediaTypes...) + + o, err := makeOptions(ref.Context(), options...) + if err != nil { + return nil, err + } + + f, err := makeFetcher(ref, o) + if err != nil { + return nil, err + } + + return f.headManifest(ref, acceptable) +} + +// Handle options and fetch the manifest with the acceptable MediaTypes in the +// Accept header. +func get(ref name.Reference, acceptable []types.MediaType, options ...Option) (*Descriptor, error) { + o, err := makeOptions(ref.Context(), options...) + if err != nil { + return nil, err + } + f, err := makeFetcher(ref, o) + if err != nil { + return nil, err + } + b, desc, err := f.fetchManifest(ref, acceptable) + if err != nil { + return nil, err + } + return &Descriptor{ + fetcher: *f, + Manifest: b, + Descriptor: *desc, + platform: o.platform, + }, nil +} + +// Image converts the Descriptor into a v1.Image. +// +// If the fetched artifact is already an image, it will just return it. +// +// If the fetched artifact is an index, it will attempt to resolve the index to +// a child image with the appropriate platform. +// +// See WithPlatform to set the desired platform. +func (d *Descriptor) Image() (v1.Image, error) { + switch d.MediaType { + case types.DockerManifestSchema1, types.DockerManifestSchema1Signed: + // We don't care to support schema 1 images: + // https://github.com/google/go-containerregistry/issues/377 + return nil, newErrSchema1(d.MediaType) + case types.OCIImageIndex, types.DockerManifestList: + // We want an image but the registry has an index, resolve it to an image. + return d.remoteIndex().imageByPlatform(d.platform) + case types.OCIManifestSchema1, types.DockerManifestSchema2: + // These are expected. Enumerated here to allow a default case. + default: + // We could just return an error here, but some registries (e.g. static + // registries) don't set the Content-Type headers correctly, so instead... + logs.Warn.Printf("Unexpected media type for Image(): %s", d.MediaType) + } + + // Wrap the v1.Layers returned by this v1.Image in a hint for downstream + // remote.Write calls to facilitate cross-repo "mounting". + imgCore, err := partial.CompressedToImage(d.remoteImage()) + if err != nil { + return nil, err + } + return &mountableImage{ + Image: imgCore, + Reference: d.Ref, + }, nil +} + +// ImageIndex converts the Descriptor into a v1.ImageIndex. +func (d *Descriptor) ImageIndex() (v1.ImageIndex, error) { + switch d.MediaType { + case types.DockerManifestSchema1, types.DockerManifestSchema1Signed: + // We don't care to support schema 1 images: + // https://github.com/google/go-containerregistry/issues/377 + return nil, newErrSchema1(d.MediaType) + case types.OCIManifestSchema1, types.DockerManifestSchema2: + // We want an index but the registry has an image, nothing we can do. + return nil, fmt.Errorf("unexpected media type for ImageIndex(): %s; call Image() instead", d.MediaType) + case types.OCIImageIndex, types.DockerManifestList: + // These are expected. + default: + // We could just return an error here, but some registries (e.g. static + // registries) don't set the Content-Type headers correctly, so instead... + logs.Warn.Printf("Unexpected media type for ImageIndex(): %s", d.MediaType) + } + return d.remoteIndex(), nil +} + +func (d *Descriptor) remoteImage() *remoteImage { + return &remoteImage{ + fetcher: d.fetcher, + manifest: d.Manifest, + mediaType: d.MediaType, + descriptor: &d.Descriptor, + } +} + +func (d *Descriptor) remoteIndex() *remoteIndex { + return &remoteIndex{ + fetcher: d.fetcher, + manifest: d.Manifest, + mediaType: d.MediaType, + descriptor: &d.Descriptor, + } +} + +// fetcher implements methods for reading from a registry. +type fetcher struct { + Ref name.Reference + Client *http.Client + context context.Context +} + +func makeFetcher(ref name.Reference, o *options) (*fetcher, error) { + tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, []string{ref.Scope(transport.PullScope)}) + if err != nil { + return nil, err + } + return &fetcher{ + Ref: ref, + Client: &http.Client{Transport: tr}, + context: o.context, + }, nil +} + +// url returns a url.Url for the specified path in the context of this remote image reference. +func (f *fetcher) url(resource, identifier string) url.URL { + return url.URL{ + Scheme: f.Ref.Context().Registry.Scheme(), + Host: f.Ref.Context().RegistryStr(), + Path: fmt.Sprintf("/v2/%s/%s/%s", f.Ref.Context().RepositoryStr(), resource, identifier), + } +} + +func (f *fetcher) fetchManifest(ref name.Reference, acceptable []types.MediaType) ([]byte, *v1.Descriptor, error) { + u := f.url("manifests", ref.Identifier()) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, nil, err + } + accept := []string{} + for _, mt := range acceptable { + accept = append(accept, string(mt)) + } + req.Header.Set("Accept", strings.Join(accept, ",")) + + resp, err := f.Client.Do(req.WithContext(f.context)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, nil, err + } + + manifest, err := io.ReadAll(resp.Body) + if err != nil { + return nil, nil, err + } + + digest, size, err := v1.SHA256(bytes.NewReader(manifest)) + if err != nil { + return nil, nil, err + } + + mediaType := types.MediaType(resp.Header.Get("Content-Type")) + contentDigest, err := v1.NewHash(resp.Header.Get("Docker-Content-Digest")) + if err == nil && mediaType == types.DockerManifestSchema1Signed { + // If we can parse the digest from the header, and it's a signed schema 1 + // manifest, let's use that for the digest to appease older registries. + digest = contentDigest + } + + // Validate the digest matches what we asked for, if pulling by digest. + if dgst, ok := ref.(name.Digest); ok { + if digest.String() != dgst.DigestStr() { + return nil, nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref) + } + } + // Do nothing for tags; I give up. + // + // We'd like to validate that the "Docker-Content-Digest" header matches what is returned by the registry, + // but so many registries implement this incorrectly that it's not worth checking. + // + // For reference: + // https://github.com/GoogleContainerTools/kaniko/issues/298 + + // Return all this info since we have to calculate it anyway. + desc := v1.Descriptor{ + Digest: digest, + Size: size, + MediaType: mediaType, + } + + return manifest, &desc, nil +} + +func (f *fetcher) headManifest(ref name.Reference, acceptable []types.MediaType) (*v1.Descriptor, error) { + u := f.url("manifests", ref.Identifier()) + req, err := http.NewRequest(http.MethodHead, u.String(), nil) + if err != nil { + return nil, err + } + accept := []string{} + for _, mt := range acceptable { + accept = append(accept, string(mt)) + } + req.Header.Set("Accept", strings.Join(accept, ",")) + + resp, err := f.Client.Do(req.WithContext(f.context)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, err + } + + mth := resp.Header.Get("Content-Type") + if mth == "" { + return nil, fmt.Errorf("HEAD %s: response did not include Content-Type header", u.String()) + } + mediaType := types.MediaType(mth) + + size := resp.ContentLength + if size == -1 { + return nil, fmt.Errorf("GET %s: response did not include Content-Length header", u.String()) + } + + dh := resp.Header.Get("Docker-Content-Digest") + if dh == "" { + return nil, fmt.Errorf("HEAD %s: response did not include Docker-Content-Digest header", u.String()) + } + digest, err := v1.NewHash(dh) + if err != nil { + return nil, err + } + + // Validate the digest matches what we asked for, if pulling by digest. + if dgst, ok := ref.(name.Digest); ok { + if digest.String() != dgst.DigestStr() { + return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref) + } + } + + // Return all this info since we have to calculate it anyway. + return &v1.Descriptor{ + Digest: digest, + Size: size, + MediaType: mediaType, + }, nil +} + +func (f *fetcher) fetchBlob(ctx context.Context, size int64, h v1.Hash) (io.ReadCloser, error) { + u := f.url("blobs", h.String()) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + resp, err := f.Client.Do(req.WithContext(ctx)) + if err != nil { + return nil, redact.Error(err) + } + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + resp.Body.Close() + return nil, err + } + + // Do whatever we can. + // If we have an expected size and Content-Length doesn't match, return an error. + // If we don't have an expected size and we do have a Content-Length, use Content-Length. + if hsize := resp.ContentLength; hsize != -1 { + if size == verify.SizeUnknown { + size = hsize + } else if hsize != size { + return nil, fmt.Errorf("GET %s: Content-Length header %d does not match expected size %d", u.String(), hsize, size) + } + } + + return verify.ReadCloser(resp.Body, size, h) +} + +func (f *fetcher) headBlob(h v1.Hash) (*http.Response, error) { + u := f.url("blobs", h.String()) + req, err := http.NewRequest(http.MethodHead, u.String(), nil) + if err != nil { + return nil, err + } + + resp, err := f.Client.Do(req.WithContext(f.context)) + if err != nil { + return nil, redact.Error(err) + } + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + resp.Body.Close() + return nil, err + } + + return resp, nil +} + +func (f *fetcher) blobExists(h v1.Hash) (bool, error) { + u := f.url("blobs", h.String()) + req, err := http.NewRequest(http.MethodHead, u.String(), nil) + if err != nil { + return false, err + } + + resp, err := f.Client.Do(req.WithContext(f.context)) + if err != nil { + return false, redact.Error(err) + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { + return false, err + } + + return resp.StatusCode == http.StatusOK, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go new file mode 100644 index 00000000..846ba07c --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package remote provides facilities for reading/writing v1.Images from/to +// a remote image registry. +package remote diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go new file mode 100644 index 00000000..4e17de76 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go @@ -0,0 +1,247 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "io" + "net/http" + "net/url" + "sync" + + "github.com/google/go-containerregistry/internal/redact" + "github.com/google/go-containerregistry/internal/verify" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +var acceptableImageMediaTypes = []types.MediaType{ + types.DockerManifestSchema2, + types.OCIManifestSchema1, +} + +// remoteImage accesses an image from a remote registry +type remoteImage struct { + fetcher + manifestLock sync.Mutex // Protects manifest + manifest []byte + configLock sync.Mutex // Protects config + config []byte + mediaType types.MediaType + descriptor *v1.Descriptor +} + +var _ partial.CompressedImageCore = (*remoteImage)(nil) + +// Image provides access to a remote image reference. +func Image(ref name.Reference, options ...Option) (v1.Image, error) { + desc, err := Get(ref, options...) + if err != nil { + return nil, err + } + + return desc.Image() +} + +func (r *remoteImage) MediaType() (types.MediaType, error) { + if string(r.mediaType) != "" { + return r.mediaType, nil + } + return types.DockerManifestSchema2, nil +} + +func (r *remoteImage) RawManifest() ([]byte, error) { + r.manifestLock.Lock() + defer r.manifestLock.Unlock() + if r.manifest != nil { + return r.manifest, nil + } + + // NOTE(jonjohnsonjr): We should never get here because the public entrypoints + // do type-checking via remote.Descriptor. I've left this here for tests that + // directly instantiate a remoteImage. + manifest, desc, err := r.fetchManifest(r.Ref, acceptableImageMediaTypes) + if err != nil { + return nil, err + } + + if r.descriptor == nil { + r.descriptor = desc + } + r.mediaType = desc.MediaType + r.manifest = manifest + return r.manifest, nil +} + +func (r *remoteImage) RawConfigFile() ([]byte, error) { + r.configLock.Lock() + defer r.configLock.Unlock() + if r.config != nil { + return r.config, nil + } + + m, err := partial.Manifest(r) + if err != nil { + return nil, err + } + + if m.Config.Data != nil { + if err := verify.Descriptor(m.Config); err != nil { + return nil, err + } + r.config = m.Config.Data + return r.config, nil + } + + body, err := r.fetchBlob(r.context, m.Config.Size, m.Config.Digest) + if err != nil { + return nil, err + } + defer body.Close() + + r.config, err = io.ReadAll(body) + if err != nil { + return nil, err + } + return r.config, nil +} + +// Descriptor retains the original descriptor from an index manifest. +// See partial.Descriptor. +func (r *remoteImage) Descriptor() (*v1.Descriptor, error) { + // kind of a hack, but RawManifest does appropriate locking/memoization + // and makes sure r.descriptor is populated. + _, err := r.RawManifest() + return r.descriptor, err +} + +// remoteImageLayer implements partial.CompressedLayer +type remoteImageLayer struct { + ri *remoteImage + digest v1.Hash +} + +// Digest implements partial.CompressedLayer +func (rl *remoteImageLayer) Digest() (v1.Hash, error) { + return rl.digest, nil +} + +// Compressed implements partial.CompressedLayer +func (rl *remoteImageLayer) Compressed() (io.ReadCloser, error) { + urls := []url.URL{rl.ri.url("blobs", rl.digest.String())} + + // Add alternative layer sources from URLs (usually none). + d, err := partial.BlobDescriptor(rl, rl.digest) + if err != nil { + return nil, err + } + + if d.Data != nil { + return verify.ReadCloser(io.NopCloser(bytes.NewReader(d.Data)), d.Size, d.Digest) + } + + // We don't want to log binary layers -- this can break terminals. + ctx := redact.NewContext(rl.ri.context, "omitting binary blobs from logs") + + for _, s := range d.URLs { + u, err := url.Parse(s) + if err != nil { + return nil, err + } + urls = append(urls, *u) + } + + // The lastErr for most pulls will be the same (the first error), but for + // foreign layers we'll want to surface the last one, since we try to pull + // from the registry first, which would often fail. + // TODO: Maybe we don't want to try pulling from the registry first? + var lastErr error + for _, u := range urls { + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + resp, err := rl.ri.Client.Do(req.WithContext(ctx)) + if err != nil { + lastErr = err + continue + } + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + resp.Body.Close() + lastErr = err + continue + } + + return verify.ReadCloser(resp.Body, d.Size, rl.digest) + } + + return nil, lastErr +} + +// Manifest implements partial.WithManifest so that we can use partial.BlobSize below. +func (rl *remoteImageLayer) Manifest() (*v1.Manifest, error) { + return partial.Manifest(rl.ri) +} + +// MediaType implements v1.Layer +func (rl *remoteImageLayer) MediaType() (types.MediaType, error) { + bd, err := partial.BlobDescriptor(rl, rl.digest) + if err != nil { + return "", err + } + + return bd.MediaType, nil +} + +// Size implements partial.CompressedLayer +func (rl *remoteImageLayer) Size() (int64, error) { + // Look up the size of this digest in the manifest to avoid a request. + return partial.BlobSize(rl, rl.digest) +} + +// ConfigFile implements partial.WithManifestAndConfigFile so that we can use partial.BlobToDiffID below. +func (rl *remoteImageLayer) ConfigFile() (*v1.ConfigFile, error) { + return partial.ConfigFile(rl.ri) +} + +// DiffID implements partial.WithDiffID so that we don't recompute a DiffID that we already have +// available in our ConfigFile. +func (rl *remoteImageLayer) DiffID() (v1.Hash, error) { + return partial.BlobToDiffID(rl, rl.digest) +} + +// Descriptor retains the original descriptor from an image manifest. +// See partial.Descriptor. +func (rl *remoteImageLayer) Descriptor() (*v1.Descriptor, error) { + return partial.BlobDescriptor(rl, rl.digest) +} + +// See partial.Exists. +func (rl *remoteImageLayer) Exists() (bool, error) { + return rl.ri.blobExists(rl.digest) +} + +// LayerByDigest implements partial.CompressedLayer +func (r *remoteImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { + return &remoteImageLayer{ + ri: r, + digest: h, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go new file mode 100644 index 00000000..a4ee74c0 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go @@ -0,0 +1,309 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "fmt" + "sync" + + "github.com/google/go-containerregistry/internal/verify" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +var acceptableIndexMediaTypes = []types.MediaType{ + types.DockerManifestList, + types.OCIImageIndex, +} + +// remoteIndex accesses an index from a remote registry +type remoteIndex struct { + fetcher + manifestLock sync.Mutex // Protects manifest + manifest []byte + mediaType types.MediaType + descriptor *v1.Descriptor +} + +// Index provides access to a remote index reference. +func Index(ref name.Reference, options ...Option) (v1.ImageIndex, error) { + desc, err := get(ref, acceptableIndexMediaTypes, options...) + if err != nil { + return nil, err + } + + return desc.ImageIndex() +} + +func (r *remoteIndex) MediaType() (types.MediaType, error) { + if string(r.mediaType) != "" { + return r.mediaType, nil + } + return types.DockerManifestList, nil +} + +func (r *remoteIndex) Digest() (v1.Hash, error) { + return partial.Digest(r) +} + +func (r *remoteIndex) Size() (int64, error) { + return partial.Size(r) +} + +func (r *remoteIndex) RawManifest() ([]byte, error) { + r.manifestLock.Lock() + defer r.manifestLock.Unlock() + if r.manifest != nil { + return r.manifest, nil + } + + // NOTE(jonjohnsonjr): We should never get here because the public entrypoints + // do type-checking via remote.Descriptor. I've left this here for tests that + // directly instantiate a remoteIndex. + manifest, desc, err := r.fetchManifest(r.Ref, acceptableIndexMediaTypes) + if err != nil { + return nil, err + } + + if r.descriptor == nil { + r.descriptor = desc + } + r.mediaType = desc.MediaType + r.manifest = manifest + return r.manifest, nil +} + +func (r *remoteIndex) IndexManifest() (*v1.IndexManifest, error) { + b, err := r.RawManifest() + if err != nil { + return nil, err + } + return v1.ParseIndexManifest(bytes.NewReader(b)) +} + +func (r *remoteIndex) Image(h v1.Hash) (v1.Image, error) { + desc, err := r.childByHash(h) + if err != nil { + return nil, err + } + + // Descriptor.Image will handle coercing nested indexes into an Image. + return desc.Image() +} + +// Descriptor retains the original descriptor from an index manifest. +// See partial.Descriptor. +func (r *remoteIndex) Descriptor() (*v1.Descriptor, error) { + // kind of a hack, but RawManifest does appropriate locking/memoization + // and makes sure r.descriptor is populated. + _, err := r.RawManifest() + return r.descriptor, err +} + +func (r *remoteIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { + desc, err := r.childByHash(h) + if err != nil { + return nil, err + } + return desc.ImageIndex() +} + +// Workaround for #819. +func (r *remoteIndex) Layer(h v1.Hash) (v1.Layer, error) { + index, err := r.IndexManifest() + if err != nil { + return nil, err + } + for _, childDesc := range index.Manifests { + if h == childDesc.Digest { + l, err := partial.CompressedToLayer(&remoteLayer{ + fetcher: r.fetcher, + digest: h, + }) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: r.Ref.Context().Digest(h.String()), + }, nil + } + } + return nil, fmt.Errorf("layer not found: %s", h) +} + +// Experiment with a better API for v1.ImageIndex. We might want to move this +// to partial? +func (r *remoteIndex) Manifests() ([]partial.Describable, error) { + m, err := r.IndexManifest() + if err != nil { + return nil, err + } + manifests := []partial.Describable{} + for _, desc := range m.Manifests { + switch { + case desc.MediaType.IsImage(): + img, err := r.Image(desc.Digest) + if err != nil { + return nil, err + } + manifests = append(manifests, img) + case desc.MediaType.IsIndex(): + idx, err := r.ImageIndex(desc.Digest) + if err != nil { + return nil, err + } + manifests = append(manifests, idx) + default: + layer, err := r.Layer(desc.Digest) + if err != nil { + return nil, err + } + manifests = append(manifests, layer) + } + } + + return manifests, nil +} + +func (r *remoteIndex) imageByPlatform(platform v1.Platform) (v1.Image, error) { + desc, err := r.childByPlatform(platform) + if err != nil { + return nil, err + } + + // Descriptor.Image will handle coercing nested indexes into an Image. + return desc.Image() +} + +// This naively matches the first manifest with matching platform attributes. +// +// We should probably use this instead: +// +// github.com/containerd/containerd/platforms +// +// But first we'd need to migrate to: +// +// github.com/opencontainers/image-spec/specs-go/v1 +func (r *remoteIndex) childByPlatform(platform v1.Platform) (*Descriptor, error) { + index, err := r.IndexManifest() + if err != nil { + return nil, err + } + for _, childDesc := range index.Manifests { + // If platform is missing from child descriptor, assume it's amd64/linux. + p := defaultPlatform + if childDesc.Platform != nil { + p = *childDesc.Platform + } + + if matchesPlatform(p, platform) { + return r.childDescriptor(childDesc, platform) + } + } + return nil, fmt.Errorf("no child with platform %+v in index %s", platform, r.Ref) +} + +func (r *remoteIndex) childByHash(h v1.Hash) (*Descriptor, error) { + index, err := r.IndexManifest() + if err != nil { + return nil, err + } + for _, childDesc := range index.Manifests { + if h == childDesc.Digest { + return r.childDescriptor(childDesc, defaultPlatform) + } + } + return nil, fmt.Errorf("no child with digest %s in index %s", h, r.Ref) +} + +// Convert one of this index's child's v1.Descriptor into a remote.Descriptor, with the given platform option. +func (r *remoteIndex) childDescriptor(child v1.Descriptor, platform v1.Platform) (*Descriptor, error) { + ref := r.Ref.Context().Digest(child.Digest.String()) + var ( + manifest []byte + err error + ) + if child.Data != nil { + if err := verify.Descriptor(child); err != nil { + return nil, err + } + manifest = child.Data + } else { + manifest, _, err = r.fetchManifest(ref, []types.MediaType{child.MediaType}) + if err != nil { + return nil, err + } + } + return &Descriptor{ + fetcher: fetcher{ + Ref: ref, + Client: r.Client, + context: r.context, + }, + Manifest: manifest, + Descriptor: child, + platform: platform, + }, nil +} + +// matchesPlatform checks if the given platform matches the required platforms. +// The given platform matches the required platform if +// - architecture and OS are identical. +// - OS version and variant are identical if provided. +// - features and OS features of the required platform are subsets of those of the given platform. +func matchesPlatform(given, required v1.Platform) bool { + // Required fields that must be identical. + if given.Architecture != required.Architecture || given.OS != required.OS { + return false + } + + // Optional fields that may be empty, but must be identical if provided. + if required.OSVersion != "" && given.OSVersion != required.OSVersion { + return false + } + if required.Variant != "" && given.Variant != required.Variant { + return false + } + + // Verify required platform's features are a subset of given platform's features. + if !isSubset(given.OSFeatures, required.OSFeatures) { + return false + } + if !isSubset(given.Features, required.Features) { + return false + } + + return true +} + +// isSubset checks if the required array of strings is a subset of the given lst. +func isSubset(lst, required []string) bool { + set := make(map[string]bool) + for _, value := range lst { + set[value] = true + } + + for _, value := range required { + if _, ok := set[value]; !ok { + return false + } + } + + return true +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go new file mode 100644 index 00000000..b2126f59 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go @@ -0,0 +1,94 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "io" + + "github.com/google/go-containerregistry/internal/redact" + "github.com/google/go-containerregistry/internal/verify" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// remoteImagelayer implements partial.CompressedLayer +type remoteLayer struct { + fetcher + digest v1.Hash +} + +// Compressed implements partial.CompressedLayer +func (rl *remoteLayer) Compressed() (io.ReadCloser, error) { + // We don't want to log binary layers -- this can break terminals. + ctx := redact.NewContext(rl.context, "omitting binary blobs from logs") + return rl.fetchBlob(ctx, verify.SizeUnknown, rl.digest) +} + +// Compressed implements partial.CompressedLayer +func (rl *remoteLayer) Size() (int64, error) { + resp, err := rl.headBlob(rl.digest) + if err != nil { + return -1, err + } + defer resp.Body.Close() + return resp.ContentLength, nil +} + +// Digest implements partial.CompressedLayer +func (rl *remoteLayer) Digest() (v1.Hash, error) { + return rl.digest, nil +} + +// MediaType implements v1.Layer +func (rl *remoteLayer) MediaType() (types.MediaType, error) { + return types.DockerLayer, nil +} + +// See partial.Exists. +func (rl *remoteLayer) Exists() (bool, error) { + return rl.blobExists(rl.digest) +} + +// Layer reads the given blob reference from a registry as a Layer. A blob +// reference here is just a punned name.Digest where the digest portion is the +// digest of the blob to be read and the repository portion is the repo where +// that blob lives. +func Layer(ref name.Digest, options ...Option) (v1.Layer, error) { + o, err := makeOptions(ref.Context(), options...) + if err != nil { + return nil, err + } + f, err := makeFetcher(ref, o) + if err != nil { + return nil, err + } + h, err := v1.NewHash(ref.Identifier()) + if err != nil { + return nil, err + } + l, err := partial.CompressedToLayer(&remoteLayer{ + fetcher: *f, + digest: h, + }) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: ref, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go new file mode 100644 index 00000000..e643c49a --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go @@ -0,0 +1,141 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +type tags struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + +// ListWithContext calls List with the given context. +// +// Deprecated: Use List and WithContext. This will be removed in a future release. +func ListWithContext(ctx context.Context, repo name.Repository, options ...Option) ([]string, error) { + return List(repo, append(options, WithContext(ctx))...) +} + +// List calls /tags/list for the given repository, returning the list of tags +// in the "tags" property. +func List(repo name.Repository, options ...Option) ([]string, error) { + o, err := makeOptions(repo, options...) + if err != nil { + return nil, err + } + scopes := []string{repo.Scope(transport.PullScope)} + tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes) + if err != nil { + return nil, err + } + + uri := &url.URL{ + Scheme: repo.Registry.Scheme(), + Host: repo.Registry.RegistryStr(), + Path: fmt.Sprintf("/v2/%s/tags/list", repo.RepositoryStr()), + } + + if o.pageSize > 0 { + uri.RawQuery = fmt.Sprintf("n=%d", o.pageSize) + } + + client := http.Client{Transport: tr} + tagList := []string{} + parsed := tags{} + + // get responses until there is no next page + for { + select { + case <-o.context.Done(): + return nil, o.context.Err() + default: + } + + req, err := http.NewRequestWithContext(o.context, "GET", uri.String(), nil) + if err != nil { + return nil, err + } + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, err + } + + if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { + return nil, err + } + + if err := resp.Body.Close(); err != nil { + return nil, err + } + + tagList = append(tagList, parsed.Tags...) + + uri, err = getNextPageURL(resp) + if err != nil { + return nil, err + } + // no next page + if uri == nil { + break + } + } + + return tagList, nil +} + +// getNextPageURL checks if there is a Link header in a http.Response which +// contains a link to the next page. If yes it returns the url.URL of the next +// page otherwise it returns nil. +func getNextPageURL(resp *http.Response) (*url.URL, error) { + link := resp.Header.Get("Link") + if link == "" { + return nil, nil + } + + if link[0] != '<' { + return nil, fmt.Errorf("failed to parse link header: missing '<' in: %s", link) + } + + end := strings.Index(link, ">") + if end == -1 { + return nil, fmt.Errorf("failed to parse link header: missing '>' in: %s", link) + } + link = link[1:end] + + linkURL, err := url.Parse(link) + if err != nil { + return nil, err + } + if resp.Request == nil || resp.Request.URL == nil { + return nil, nil + } + linkURL = resp.Request.URL.ResolveReference(linkURL) + return linkURL, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go new file mode 100644 index 00000000..36d08856 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go @@ -0,0 +1,108 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" +) + +// MountableLayer wraps a v1.Layer in a shim that enables the layer to be +// "mounted" when published to another registry. +type MountableLayer struct { + v1.Layer + + Reference name.Reference +} + +// Descriptor retains the original descriptor from an image manifest. +// See partial.Descriptor. +func (ml *MountableLayer) Descriptor() (*v1.Descriptor, error) { + return partial.Descriptor(ml.Layer) +} + +// Exists is a hack. See partial.Exists. +func (ml *MountableLayer) Exists() (bool, error) { + return partial.Exists(ml.Layer) +} + +// mountableImage wraps the v1.Layer references returned by the embedded v1.Image +// in MountableLayer's so that remote.Write might attempt to mount them from their +// source repository. +type mountableImage struct { + v1.Image + + Reference name.Reference +} + +// Layers implements v1.Image +func (mi *mountableImage) Layers() ([]v1.Layer, error) { + ls, err := mi.Image.Layers() + if err != nil { + return nil, err + } + mls := make([]v1.Layer, 0, len(ls)) + for _, l := range ls { + mls = append(mls, &MountableLayer{ + Layer: l, + Reference: mi.Reference, + }) + } + return mls, nil +} + +// LayerByDigest implements v1.Image +func (mi *mountableImage) LayerByDigest(d v1.Hash) (v1.Layer, error) { + l, err := mi.Image.LayerByDigest(d) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: mi.Reference, + }, nil +} + +// LayerByDiffID implements v1.Image +func (mi *mountableImage) LayerByDiffID(d v1.Hash) (v1.Layer, error) { + l, err := mi.Image.LayerByDiffID(d) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: mi.Reference, + }, nil +} + +// Descriptor retains the original descriptor from an index manifest. +// See partial.Descriptor. +func (mi *mountableImage) Descriptor() (*v1.Descriptor, error) { + return partial.Descriptor(mi.Image) +} + +// ConfigLayer retains the original reference so that it can be mounted. +// See partial.ConfigLayer. +func (mi *mountableImage) ConfigLayer() (v1.Layer, error) { + l, err := partial.ConfigLayer(mi.Image) + if err != nil { + return nil, err + } + return &MountableLayer{ + Layer: l, + Reference: mi.Reference, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go new file mode 100644 index 00000000..7f32413c --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go @@ -0,0 +1,302 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "fmt" + "net/http" + + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/types" + "golang.org/x/sync/errgroup" +) + +// MultiWrite writes the given Images or ImageIndexes to the given refs, as +// efficiently as possible, by deduping shared layer blobs and uploading layers +// in parallel, then uploading all manifests in parallel. +// +// Current limitations: +// - All refs must share the same repository. +// - Images cannot consist of stream.Layers. +func MultiWrite(m map[name.Reference]Taggable, options ...Option) (rerr error) { + // Determine the repository being pushed to; if asked to push to + // multiple repositories, give up. + var repo, zero name.Repository + for ref := range m { + if repo == zero { + repo = ref.Context() + } else if ref.Context() != repo { + return fmt.Errorf("MultiWrite can only push to the same repository (saw %q and %q)", repo, ref.Context()) + } + } + + o, err := makeOptions(repo, options...) + if err != nil { + return err + } + + // Collect unique blobs (layers and config blobs). + blobs := map[v1.Hash]v1.Layer{} + newManifests := []map[name.Reference]Taggable{} + // Separate originally requested images and indexes, so we can push images first. + images, indexes := map[name.Reference]Taggable{}, map[name.Reference]Taggable{} + for ref, i := range m { + if img, ok := i.(v1.Image); ok { + images[ref] = i + if err := addImageBlobs(img, blobs, o.allowNondistributableArtifacts); err != nil { + return err + } + continue + } + if idx, ok := i.(v1.ImageIndex); ok { + indexes[ref] = i + newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, 0, o.allowNondistributableArtifacts) + if err != nil { + return err + } + continue + } + return fmt.Errorf("pushable resource was not Image or ImageIndex: %T", i) + } + + // Determine if any of the layers are Mountable, because if so we need + // to request Pull scope too. + ls := []v1.Layer{} + for _, l := range blobs { + ls = append(ls, l) + } + scopes := scopesForUploadingImage(repo, ls) + tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes) + if err != nil { + return err + } + w := writer{ + repo: repo, + client: &http.Client{Transport: tr}, + backoff: o.retryBackoff, + predicate: o.retryPredicate, + } + + // Collect the total size of blobs and manifests we're about to write. + if o.updates != nil { + w.progress = &progress{updates: o.updates} + w.progress.lastUpdate = &v1.Update{} + defer close(o.updates) + defer func() { _ = w.progress.err(rerr) }() + for _, b := range blobs { + size, err := b.Size() + if err != nil { + return err + } + w.progress.total(size) + } + countManifest := func(t Taggable) error { + b, err := t.RawManifest() + if err != nil { + return err + } + w.progress.total(int64(len(b))) + return nil + } + for _, i := range images { + if err := countManifest(i); err != nil { + return err + } + } + for _, nm := range newManifests { + for _, i := range nm { + if err := countManifest(i); err != nil { + return err + } + } + } + for _, i := range indexes { + if err := countManifest(i); err != nil { + return err + } + } + } + + // Upload individual blobs and collect any errors. + blobChan := make(chan v1.Layer, 2*o.jobs) + ctx := o.context + g, gctx := errgroup.WithContext(o.context) + for i := 0; i < o.jobs; i++ { + // Start N workers consuming blobs to upload. + g.Go(func() error { + for b := range blobChan { + if err := w.uploadOne(gctx, b); err != nil { + return err + } + } + return nil + }) + } + g.Go(func() error { + defer close(blobChan) + for _, b := range blobs { + select { + case blobChan <- b: + case <-gctx.Done(): + return gctx.Err() + } + } + return nil + }) + if err := g.Wait(); err != nil { + return err + } + + commitMany := func(ctx context.Context, m map[name.Reference]Taggable) error { + g, ctx := errgroup.WithContext(ctx) + // With all of the constituent elements uploaded, upload the manifests + // to commit the images and indexes, and collect any errors. + type task struct { + i Taggable + ref name.Reference + } + taskChan := make(chan task, 2*o.jobs) + for i := 0; i < o.jobs; i++ { + // Start N workers consuming tasks to upload manifests. + g.Go(func() error { + for t := range taskChan { + if err := w.commitManifest(ctx, t.i, t.ref); err != nil { + return err + } + } + return nil + }) + } + go func() { + for ref, i := range m { + taskChan <- task{i, ref} + } + close(taskChan) + }() + return g.Wait() + } + // Push originally requested image manifests. These have no + // dependencies. + if err := commitMany(ctx, images); err != nil { + return err + } + // Push new manifests from lowest levels up. + for i := len(newManifests) - 1; i >= 0; i-- { + if err := commitMany(ctx, newManifests[i]); err != nil { + return err + } + } + // Push originally requested index manifests, which might depend on + // newly discovered manifests. + + return commitMany(ctx, indexes) +} + +// addIndexBlobs adds blobs to the set of blobs we intend to upload, and +// returns the latest copy of the ordered collection of manifests to upload. +func addIndexBlobs(idx v1.ImageIndex, blobs map[v1.Hash]v1.Layer, repo name.Repository, newManifests []map[name.Reference]Taggable, lvl int, allowNondistributableArtifacts bool) ([]map[name.Reference]Taggable, error) { + if lvl > len(newManifests)-1 { + newManifests = append(newManifests, map[name.Reference]Taggable{}) + } + + im, err := idx.IndexManifest() + if err != nil { + return nil, err + } + for _, desc := range im.Manifests { + switch desc.MediaType { + case types.OCIImageIndex, types.DockerManifestList: + idx, err := idx.ImageIndex(desc.Digest) + if err != nil { + return nil, err + } + newManifests, err = addIndexBlobs(idx, blobs, repo, newManifests, lvl+1, allowNondistributableArtifacts) + if err != nil { + return nil, err + } + + // Also track the sub-index manifest to upload later by digest. + newManifests[lvl][repo.Digest(desc.Digest.String())] = idx + case types.OCIManifestSchema1, types.DockerManifestSchema2: + img, err := idx.Image(desc.Digest) + if err != nil { + return nil, err + } + if err := addImageBlobs(img, blobs, allowNondistributableArtifacts); err != nil { + return nil, err + } + + // Also track the sub-image manifest to upload later by digest. + newManifests[lvl][repo.Digest(desc.Digest.String())] = img + default: + // Workaround for #819. + if wl, ok := idx.(withLayer); ok { + layer, err := wl.Layer(desc.Digest) + if err != nil { + return nil, err + } + if err := addLayerBlob(layer, blobs, allowNondistributableArtifacts); err != nil { + return nil, err + } + } else { + return nil, fmt.Errorf("unknown media type: %v", desc.MediaType) + } + } + } + return newManifests, nil +} + +func addLayerBlob(l v1.Layer, blobs map[v1.Hash]v1.Layer, allowNondistributableArtifacts bool) error { + // Ignore foreign layers. + mt, err := l.MediaType() + if err != nil { + return err + } + + if mt.IsDistributable() || allowNondistributableArtifacts { + d, err := l.Digest() + if err != nil { + return err + } + + blobs[d] = l + } + + return nil +} + +func addImageBlobs(img v1.Image, blobs map[v1.Hash]v1.Layer, allowNondistributableArtifacts bool) error { + ls, err := img.Layers() + if err != nil { + return err + } + // Collect all layers. + for _, l := range ls { + if err := addLayerBlob(l, blobs, allowNondistributableArtifacts); err != nil { + return err + } + } + + // Collect config blob. + cl, err := partial.ConfigLayer(img) + if err != nil { + return err + } + return addLayerBlob(cl, blobs, allowNondistributableArtifacts) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go new file mode 100644 index 00000000..91b9005c --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go @@ -0,0 +1,305 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "context" + "errors" + "io" + "net" + "net/http" + "syscall" + "time" + + "github.com/google/go-containerregistry/internal/retry" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/logs" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +// Option is a functional option for remote operations. +type Option func(*options) error + +type options struct { + auth authn.Authenticator + keychain authn.Keychain + transport http.RoundTripper + platform v1.Platform + context context.Context + jobs int + userAgent string + allowNondistributableArtifacts bool + updates chan<- v1.Update + pageSize int + retryBackoff Backoff + retryPredicate retry.Predicate +} + +var defaultPlatform = v1.Platform{ + Architecture: "amd64", + OS: "linux", +} + +// Backoff is an alias of retry.Backoff to expose this configuration option to consumers of this lib +type Backoff = retry.Backoff + +var defaultRetryPredicate retry.Predicate = func(err error) bool { + // Various failure modes here, as we're often reading from and writing to + // the network. + if retry.IsTemporary(err) || errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, io.EOF) || errors.Is(err, syscall.EPIPE) || errors.Is(err, syscall.ECONNRESET) { + logs.Warn.Printf("retrying %v", err) + return true + } + return false +} + +// Try this three times, waiting 1s after first failure, 3s after second. +var defaultRetryBackoff = Backoff{ + Duration: 1.0 * time.Second, + Factor: 3.0, + Jitter: 0.1, + Steps: 3, +} + +// Useful for tests +var fastBackoff = Backoff{ + Duration: 1.0 * time.Millisecond, + Factor: 3.0, + Jitter: 0.1, + Steps: 3, +} + +var retryableStatusCodes = []int{ + http.StatusRequestTimeout, + http.StatusInternalServerError, + http.StatusBadGateway, + http.StatusServiceUnavailable, + http.StatusGatewayTimeout, +} + +const ( + defaultJobs = 4 + + // ECR returns an error if n > 1000: + // https://github.com/google/go-containerregistry/issues/1091 + defaultPageSize = 1000 +) + +// DefaultTransport is based on http.DefaultTransport with modifications +// documented inline below. +var DefaultTransport http.RoundTripper = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, +} + +func makeOptions(target authn.Resource, opts ...Option) (*options, error) { + o := &options{ + transport: DefaultTransport, + platform: defaultPlatform, + context: context.Background(), + jobs: defaultJobs, + pageSize: defaultPageSize, + retryPredicate: defaultRetryPredicate, + retryBackoff: defaultRetryBackoff, + } + + for _, option := range opts { + if err := option(o); err != nil { + return nil, err + } + } + + switch { + case o.auth != nil && o.keychain != nil: + // It is a better experience to explicitly tell a caller their auth is misconfigured + // than potentially fail silently when the correct auth is overridden by option misuse. + return nil, errors.New("provide an option for either authn.Authenticator or authn.Keychain, not both") + case o.keychain != nil: + auth, err := o.keychain.Resolve(target) + if err != nil { + return nil, err + } + o.auth = auth + case o.auth == nil: + o.auth = authn.Anonymous + } + + // transport.Wrapper is a signal that consumers are opt-ing into providing their own transport without any additional wrapping. + // This is to allow consumers full control over the transports logic, such as providing retry logic. + if _, ok := o.transport.(*transport.Wrapper); !ok { + // Wrap the transport in something that logs requests and responses. + // It's expensive to generate the dumps, so skip it if we're writing + // to nothing. + if logs.Enabled(logs.Debug) { + o.transport = transport.NewLogger(o.transport) + } + + // Wrap the transport in something that can retry network flakes. + o.transport = transport.NewRetry(o.transport, transport.WithRetryPredicate(defaultRetryPredicate), transport.WithRetryStatusCodes(retryableStatusCodes...)) + + // Wrap this last to prevent transport.New from double-wrapping. + if o.userAgent != "" { + o.transport = transport.NewUserAgent(o.transport, o.userAgent) + } + } + + return o, nil +} + +// WithTransport is a functional option for overriding the default transport +// for remote operations. +// If transport.Wrapper is provided, this signals that the consumer does *not* want any further wrapping to occur. +// i.e. logging, retry and useragent +// +// The default transport is DefaultTransport. +func WithTransport(t http.RoundTripper) Option { + return func(o *options) error { + o.transport = t + return nil + } +} + +// WithAuth is a functional option for overriding the default authenticator +// for remote operations. +// It is an error to use both WithAuth and WithAuthFromKeychain in the same Option set. +// +// The default authenticator is authn.Anonymous. +func WithAuth(auth authn.Authenticator) Option { + return func(o *options) error { + o.auth = auth + return nil + } +} + +// WithAuthFromKeychain is a functional option for overriding the default +// authenticator for remote operations, using an authn.Keychain to find +// credentials. +// It is an error to use both WithAuth and WithAuthFromKeychain in the same Option set. +// +// The default authenticator is authn.Anonymous. +func WithAuthFromKeychain(keys authn.Keychain) Option { + return func(o *options) error { + o.keychain = keys + return nil + } +} + +// WithPlatform is a functional option for overriding the default platform +// that Image and Descriptor.Image use for resolving an index to an image. +// +// The default platform is amd64/linux. +func WithPlatform(p v1.Platform) Option { + return func(o *options) error { + o.platform = p + return nil + } +} + +// WithContext is a functional option for setting the context in http requests +// performed by a given function. Note that this context is used for _all_ +// http requests, not just the initial volley. E.g., for remote.Image, the +// context will be set on http requests generated by subsequent calls to +// RawConfigFile() and even methods on layers returned by Layers(). +// +// The default context is context.Background(). +func WithContext(ctx context.Context) Option { + return func(o *options) error { + o.context = ctx + return nil + } +} + +// WithJobs is a functional option for setting the parallelism of remote +// operations performed by a given function. Note that not all remote +// operations support parallelism. +// +// The default value is 4. +func WithJobs(jobs int) Option { + return func(o *options) error { + if jobs <= 0 { + return errors.New("jobs must be greater than zero") + } + o.jobs = jobs + return nil + } +} + +// WithUserAgent adds the given string to the User-Agent header for any HTTP +// requests. This header will also include "go-containerregistry/${version}". +// +// If you want to completely overwrite the User-Agent header, use WithTransport. +func WithUserAgent(ua string) Option { + return func(o *options) error { + o.userAgent = ua + return nil + } +} + +// WithNondistributable includes non-distributable (foreign) layers +// when writing images, see: +// https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers +// +// The default behaviour is to skip these layers +func WithNondistributable(o *options) error { + o.allowNondistributableArtifacts = true + return nil +} + +// WithProgress takes a channel that will receive progress updates as bytes are written. +// +// Sending updates to an unbuffered channel will block writes, so callers +// should provide a buffered channel to avoid potential deadlocks. +func WithProgress(updates chan<- v1.Update) Option { + return func(o *options) error { + o.updates = updates + return nil + } +} + +// WithPageSize sets the given size as the value of parameter 'n' in the request. +// +// To omit the `n` parameter entirely, use WithPageSize(0). +// The default value is 1000. +func WithPageSize(size int) Option { + return func(o *options) error { + o.pageSize = size + return nil + } +} + +// WithRetryBackoff sets the httpBackoff for retry HTTP operations. +func WithRetryBackoff(backoff Backoff) Option { + return func(o *options) error { + o.retryBackoff = backoff + return nil + } +} + +// WithRetryPredicate sets the predicate for retry HTTP operations. +func WithRetryPredicate(predicate retry.Predicate) Option { + return func(o *options) error { + o.retryPredicate = predicate + return nil + } +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/progress.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/progress.go new file mode 100644 index 00000000..1f439635 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/progress.go @@ -0,0 +1,69 @@ +// Copyright 2022 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "io" + "sync" + "sync/atomic" + + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +type progress struct { + sync.Mutex + updates chan<- v1.Update + lastUpdate *v1.Update +} + +func (p *progress) total(delta int64) { + atomic.AddInt64(&p.lastUpdate.Total, delta) +} + +func (p *progress) complete(delta int64) { + p.Lock() + defer p.Unlock() + p.updates <- v1.Update{ + Total: p.lastUpdate.Total, + Complete: atomic.AddInt64(&p.lastUpdate.Complete, delta), + } +} + +func (p *progress) err(err error) error { + if err != nil && p.updates != nil { + p.updates <- v1.Update{Error: err} + } + return err +} + +type progressReader struct { + rc io.ReadCloser + + count *int64 // number of bytes this reader has read, to support resetting on retry. + progress *progress +} + +func (r *progressReader) Read(b []byte) (int, error) { + n, err := r.rc.Read(b) + if err != nil { + return n, err + } + atomic.AddInt64(r.count, int64(n)) + // TODO: warn/debug log if sending takes too long, or if sending is blocked while context is canceled. + r.progress.complete(int64(n)) + return n, nil +} + +func (r *progressReader) Close() error { return r.rc.Close() } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md new file mode 100644 index 00000000..bd4d957b --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md @@ -0,0 +1,129 @@ +# `transport` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/transport?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/transport) + +The [distribution protocol](https://github.com/opencontainers/distribution-spec) is fairly simple, but correctly [implementing authentication](../../../authn/README.md) is **hard**. + +This package [implements](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote/transport#New) an [`http.RoundTripper`](https://godoc.org/net/http#RoundTripper) +that transparently performs: +* [Token +Authentication](https://docs.docker.com/registry/spec/auth/token/) and +* [OAuth2 +Authentication](https://docs.docker.com/registry/spec/auth/oauth/) + +for registry clients. + +## Raison d'être + +> Why not just use the [`docker/distribution`](https://godoc.org/github.com/docker/distribution/registry/client/auth) client? + +Great question! Mostly, because I don't want to depend on [`prometheus/client_golang`](https://github.com/prometheus/client_golang). + +As a performance optimization, that client uses [a cache](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/client/repository.go#L173) to keep track of a mapping between blob digests and their [descriptors](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/blobs.go#L57-L86). Unfortunately, the cache [uses prometheus](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/storage/cache/cachedblobdescriptorstore.go#L44) to track hits and misses, so if you want to use that client you have to pull in all of prometheus, which is pretty large. + +![docker/distribution](../../../../images/docker.dot.svg) + +> Why does it matter if you depend on prometheus? Who cares? + +It's generally polite to your downstream to reduce the number of dependencies your package requires: + +* Downloading your package is faster, which helps our Australian friends and people on airplanes. +* There is less code to compile, which speeds up builds and saves the planet from global warming. +* You reduce the likelihood of inflicting dependency hell upon your consumers. +* [Tim Hockin](https://twitter.com/thockin/status/958606077456654336) prefers it based on his experience working on Kubernetes, and he's a pretty smart guy. + +> Okay, what about [`containerd/containerd`](https://godoc.org/github.com/containerd/containerd/remotes/docker)? + +Similar reasons! That ends up pulling in grpc, protobuf, and logrus. + +![containerd/containerd](../../../../images/containerd.dot.svg) + +> Well... what about [`containers/image`](https://godoc.org/github.com/containers/image/docker)? + +That just uses the the `docker/distribution` client... and more! + +![containers/image](../../../../images/containers.dot.svg) + +> Wow, what about this package? + +Of course, this package isn't perfect either. `transport` depends on `authn`, +which in turn depends on docker's config file parsing and handling package, +which you don't strictly need but almost certainly want if you're going to be +interacting with a registry. + +![google/go-containerregistry](../../../../images/ggcr.dot.svg) + +*These graphs were generated by +[`kisielk/godepgraph`](https://github.com/kisielk/godepgraph).* + +## Usage + +This is heavily used by the +[`remote`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) +package, which implements higher level image-centric functionality, but this +package is useful if you want to interact directly with the registry to do +something that `remote` doesn't support, e.g. [to handle with schema 1 +images](https://github.com/google/go-containerregistry/pull/509). + +This package also includes some [error +handling](https://github.com/opencontainers/distribution-spec/blob/60be706c34ee7805bdd1d3d11affec53b0dfb8fb/spec.md#errors) +facilities in the form of +[`CheckError`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote/transport#CheckError), +which will parse the response body into a structured error for unexpected http +status codes. + +Here's a "simple" program that writes the result of +[listing tags](https://github.com/opencontainers/distribution-spec/blob/60be706c34ee7805bdd1d3d11affec53b0dfb8fb/spec.md#tags) +for [`gcr.io/google-containers/pause`](https://gcr.io/google-containers/pause) +to stdout. + +```go +package main + +import ( + "io" + "net/http" + "os" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" +) + +func main() { + repo, err := name.NewRepository("gcr.io/google-containers/pause") + if err != nil { + panic(err) + } + + // Fetch credentials based on your docker config file, which is $HOME/.docker/config.json or $DOCKER_CONFIG. + auth, err := authn.DefaultKeychain.Resolve(repo.Registry) + if err != nil { + panic(err) + } + + // Construct an http.Client that is authorized to pull from gcr.io/google-containers/pause. + scopes := []string{repo.Scope(transport.PullScope)} + t, err := transport.New(repo.Registry, auth, http.DefaultTransport, scopes) + if err != nil { + panic(err) + } + client := &http.Client{Transport: t} + + // Make the actual request. + resp, err := client.Get("https://gcr.io/v2/google-containers/pause/tags/list") + if err != nil { + panic(err) + } + + // Assert that we get a 200, otherwise attempt to parse body as a structured error. + if err := transport.CheckError(resp, http.StatusOK); err != nil { + panic(err) + } + + // Write the response to stdout. + if _, err := io.Copy(os.Stdout, resp.Body); err != nil { + panic(err) + } +} +``` diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go new file mode 100644 index 00000000..fdb362b7 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go @@ -0,0 +1,62 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "encoding/base64" + "fmt" + "net/http" + + "github.com/google/go-containerregistry/pkg/authn" +) + +type basicTransport struct { + inner http.RoundTripper + auth authn.Authenticator + target string +} + +var _ http.RoundTripper = (*basicTransport)(nil) + +// RoundTrip implements http.RoundTripper +func (bt *basicTransport) RoundTrip(in *http.Request) (*http.Response, error) { + if bt.auth != authn.Anonymous { + auth, err := bt.auth.Authorization() + if err != nil { + return nil, err + } + + // http.Client handles redirects at a layer above the http.RoundTripper + // abstraction, so to avoid forwarding Authorization headers to places + // we are redirected, only set it when the authorization header matches + // the host with which we are interacting. + // In case of redirect http.Client can use an empty Host, check URL too. + if in.Host == bt.target || in.URL.Host == bt.target { + if bearer := auth.RegistryToken; bearer != "" { + hdr := fmt.Sprintf("Bearer %s", bearer) + in.Header.Set("Authorization", hdr) + } else if user, pass := auth.Username, auth.Password; user != "" && pass != "" { + delimited := fmt.Sprintf("%s:%s", user, pass) + encoded := base64.StdEncoding.EncodeToString([]byte(delimited)) + hdr := fmt.Sprintf("Basic %s", encoded) + in.Header.Set("Authorization", hdr) + } else if token := auth.Auth; token != "" { + hdr := fmt.Sprintf("Basic %s", token) + in.Header.Set("Authorization", hdr) + } + } + } + return bt.inner.RoundTrip(in) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go new file mode 100644 index 00000000..ea07ff6a --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go @@ -0,0 +1,320 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/url" + "strings" + + authchallenge "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/google/go-containerregistry/internal/redact" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/logs" + "github.com/google/go-containerregistry/pkg/name" +) + +type bearerTransport struct { + // Wrapped by bearerTransport. + inner http.RoundTripper + // Basic credentials that we exchange for bearer tokens. + basic authn.Authenticator + // Holds the bearer response from the token service. + bearer authn.AuthConfig + // Registry to which we send bearer tokens. + registry name.Registry + // See https://tools.ietf.org/html/rfc6750#section-3 + realm string + // See https://docs.docker.com/registry/spec/auth/token/ + service string + scopes []string + // Scheme we should use, determined by ping response. + scheme string +} + +var _ http.RoundTripper = (*bearerTransport)(nil) + +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +func stringSet(ss []string) map[string]struct{} { + set := make(map[string]struct{}) + for _, s := range ss { + set[s] = struct{}{} + } + return set +} + +// RoundTrip implements http.RoundTripper +func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { + sendRequest := func() (*http.Response, error) { + // http.Client handles redirects at a layer above the http.RoundTripper + // abstraction, so to avoid forwarding Authorization headers to places + // we are redirected, only set it when the authorization header matches + // the registry with which we are interacting. + // In case of redirect http.Client can use an empty Host, check URL too. + if matchesHost(bt.registry, in, bt.scheme) { + hdr := fmt.Sprintf("Bearer %s", bt.bearer.RegistryToken) + in.Header.Set("Authorization", hdr) + } + return bt.inner.RoundTrip(in) + } + + res, err := sendRequest() + if err != nil { + return nil, err + } + + // If we hit a WWW-Authenticate challenge, it might be due to expired tokens or insufficient scope. + if challenges := authchallenge.ResponseChallenges(res); len(challenges) != 0 { + // close out old response, since we will not return it. + res.Body.Close() + + newScopes := []string{} + for _, wac := range challenges { + // TODO(jonjohnsonjr): Should we also update "realm" or "service"? + if want, ok := wac.Parameters["scope"]; ok { + // Add any scopes that we don't already request. + got := stringSet(bt.scopes) + if _, ok := got[want]; !ok { + newScopes = append(newScopes, want) + } + } + } + + // Some registries seem to only look at the first scope parameter during a token exchange. + // If a request fails because it's missing a scope, we should put those at the beginning, + // otherwise the registry might just ignore it :/ + newScopes = append(newScopes, bt.scopes...) + bt.scopes = newScopes + + // TODO(jonjohnsonjr): Teach transport.Error about "error" and "error_description" from challenge. + + // Retry the request to attempt to get a valid token. + if err = bt.refresh(in.Context()); err != nil { + return nil, err + } + return sendRequest() + } + + return res, err +} + +// It's unclear which authentication flow to use based purely on the protocol, +// so we rely on heuristics and fallbacks to support as many registries as possible. +// The basic token exchange is attempted first, falling back to the oauth flow. +// If the IdentityToken is set, this indicates that we should start with the oauth flow. +func (bt *bearerTransport) refresh(ctx context.Context) error { + auth, err := bt.basic.Authorization() + if err != nil { + return err + } + + if auth.RegistryToken != "" { + bt.bearer.RegistryToken = auth.RegistryToken + return nil + } + + var content []byte + if auth.IdentityToken != "" { + // If the secret being stored is an identity token, + // the Username should be set to , which indicates + // we are using an oauth flow. + content, err = bt.refreshOauth(ctx) + var terr *Error + if errors.As(err, &terr) && terr.StatusCode == http.StatusNotFound { + // Note: Not all token servers implement oauth2. + // If the request to the endpoint returns 404 using the HTTP POST method, + // refer to Token Documentation for using the HTTP GET method supported by all token servers. + content, err = bt.refreshBasic(ctx) + } + } else { + content, err = bt.refreshBasic(ctx) + } + if err != nil { + return err + } + + // Some registries don't have "token" in the response. See #54. + type tokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + // TODO: handle expiry? + } + + var response tokenResponse + if err := json.Unmarshal(content, &response); err != nil { + return err + } + + // Some registries set access_token instead of token. + if response.AccessToken != "" { + response.Token = response.AccessToken + } + + // Find a token to turn into a Bearer authenticator + if response.Token != "" { + bt.bearer.RegistryToken = response.Token + } else { + return fmt.Errorf("no token in bearer response:\n%s", content) + } + + // If we obtained a refresh token from the oauth flow, use that for refresh() now. + if response.RefreshToken != "" { + bt.basic = authn.FromConfig(authn.AuthConfig{ + IdentityToken: response.RefreshToken, + }) + } + + return nil +} + +func matchesHost(reg name.Registry, in *http.Request, scheme string) bool { + canonicalHeaderHost := canonicalAddress(in.Host, scheme) + canonicalURLHost := canonicalAddress(in.URL.Host, scheme) + canonicalRegistryHost := canonicalAddress(reg.RegistryStr(), scheme) + return canonicalHeaderHost == canonicalRegistryHost || canonicalURLHost == canonicalRegistryHost +} + +func canonicalAddress(host, scheme string) (address string) { + // The host may be any one of: + // - hostname + // - hostname:port + // - ipv4 + // - ipv4:port + // - ipv6 + // - [ipv6]:port + // As net.SplitHostPort returns an error if the host does not contain a port, we should only attempt + // to call it when we know that the address contains a port + if strings.Count(host, ":") == 1 || (strings.Count(host, ":") >= 2 && strings.Contains(host, "]:")) { + hostname, port, err := net.SplitHostPort(host) + if err != nil { + return host + } + if port == "" { + port = portMap[scheme] + } + + return net.JoinHostPort(hostname, port) + } + + return net.JoinHostPort(host, portMap[scheme]) +} + +// https://docs.docker.com/registry/spec/auth/oauth/ +func (bt *bearerTransport) refreshOauth(ctx context.Context) ([]byte, error) { + auth, err := bt.basic.Authorization() + if err != nil { + return nil, err + } + + u, err := url.Parse(bt.realm) + if err != nil { + return nil, err + } + + v := url.Values{} + v.Set("scope", strings.Join(bt.scopes, " ")) + if bt.service != "" { + v.Set("service", bt.service) + } + v.Set("client_id", defaultUserAgent) + if auth.IdentityToken != "" { + v.Set("grant_type", "refresh_token") + v.Set("refresh_token", auth.IdentityToken) + } else if auth.Username != "" && auth.Password != "" { + // TODO(#629): This is unreachable. + v.Set("grant_type", "password") + v.Set("username", auth.Username) + v.Set("password", auth.Password) + v.Set("access_type", "offline") + } + + client := http.Client{Transport: bt.inner} + req, err := http.NewRequest(http.MethodPost, u.String(), strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + // We don't want to log credentials. + ctx = redact.NewContext(ctx, "oauth token response contains credentials") + + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := CheckError(resp, http.StatusOK); err != nil { + if bt.basic == authn.Anonymous { + logs.Warn.Printf("No matching credentials were found for %q", bt.registry) + } + return nil, err + } + + return io.ReadAll(resp.Body) +} + +// https://docs.docker.com/registry/spec/auth/token/ +func (bt *bearerTransport) refreshBasic(ctx context.Context) ([]byte, error) { + u, err := url.Parse(bt.realm) + if err != nil { + return nil, err + } + b := &basicTransport{ + inner: bt.inner, + auth: bt.basic, + target: u.Host, + } + client := http.Client{Transport: b} + + v := u.Query() + v["scope"] = bt.scopes + v.Set("service", bt.service) + u.RawQuery = v.Encode() + + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + + // We don't want to log credentials. + ctx = redact.NewContext(ctx, "basic token response contains credentials") + + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := CheckError(resp, http.StatusOK); err != nil { + if bt.basic == authn.Anonymous { + logs.Warn.Printf("No matching credentials were found for %q", bt.registry) + } + return nil, err + } + + return io.ReadAll(resp.Body) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go new file mode 100644 index 00000000..ff7025b5 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go @@ -0,0 +1,18 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provides facilities for setting up an authenticated +// http.RoundTripper given an Authenticator and base RoundTripper. See +// transport.New for more information. +package transport diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go new file mode 100644 index 00000000..c0e43373 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go @@ -0,0 +1,173 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + + "github.com/google/go-containerregistry/internal/redact" +) + +// Error implements error to support the following error specification: +// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors +type Error struct { + Errors []Diagnostic `json:"errors,omitempty"` + // The http status code returned. + StatusCode int + // The request that failed. + Request *http.Request + // The raw body if we couldn't understand it. + rawBody string +} + +// Check that Error implements error +var _ error = (*Error)(nil) + +// Error implements error +func (e *Error) Error() string { + prefix := "" + if e.Request != nil { + prefix = fmt.Sprintf("%s %s: ", e.Request.Method, redact.URL(e.Request.URL)) + } + return prefix + e.responseErr() +} + +func (e *Error) responseErr() string { + switch len(e.Errors) { + case 0: + if len(e.rawBody) == 0 { + if e.Request != nil && e.Request.Method == http.MethodHead { + return fmt.Sprintf("unexpected status code %d %s (HEAD responses have no body, use GET for details)", e.StatusCode, http.StatusText(e.StatusCode)) + } + return fmt.Sprintf("unexpected status code %d %s", e.StatusCode, http.StatusText(e.StatusCode)) + } + return fmt.Sprintf("unexpected status code %d %s: %s", e.StatusCode, http.StatusText(e.StatusCode), e.rawBody) + case 1: + return e.Errors[0].String() + default: + var errors []string + for _, d := range e.Errors { + errors = append(errors, d.String()) + } + return fmt.Sprintf("multiple errors returned: %s", + strings.Join(errors, "; ")) + } +} + +// Temporary returns whether the request that preceded the error is temporary. +func (e *Error) Temporary() bool { + if len(e.Errors) == 0 { + _, ok := temporaryStatusCodes[e.StatusCode] + return ok + } + for _, d := range e.Errors { + if _, ok := temporaryErrorCodes[d.Code]; !ok { + return false + } + } + return true +} + +// Diagnostic represents a single error returned by a Docker registry interaction. +type Diagnostic struct { + Code ErrorCode `json:"code"` + Message string `json:"message,omitempty"` + Detail any `json:"detail,omitempty"` +} + +// String stringifies the Diagnostic in the form: $Code: $Message[; $Detail] +func (d Diagnostic) String() string { + msg := fmt.Sprintf("%s: %s", d.Code, d.Message) + if d.Detail != nil { + msg = fmt.Sprintf("%s; %v", msg, d.Detail) + } + return msg +} + +// ErrorCode is an enumeration of supported error codes. +type ErrorCode string + +// The set of error conditions a registry may return: +// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors-2 +const ( + BlobUnknownErrorCode ErrorCode = "BLOB_UNKNOWN" + BlobUploadInvalidErrorCode ErrorCode = "BLOB_UPLOAD_INVALID" + BlobUploadUnknownErrorCode ErrorCode = "BLOB_UPLOAD_UNKNOWN" + DigestInvalidErrorCode ErrorCode = "DIGEST_INVALID" + ManifestBlobUnknownErrorCode ErrorCode = "MANIFEST_BLOB_UNKNOWN" + ManifestInvalidErrorCode ErrorCode = "MANIFEST_INVALID" + ManifestUnknownErrorCode ErrorCode = "MANIFEST_UNKNOWN" + ManifestUnverifiedErrorCode ErrorCode = "MANIFEST_UNVERIFIED" + NameInvalidErrorCode ErrorCode = "NAME_INVALID" + NameUnknownErrorCode ErrorCode = "NAME_UNKNOWN" + SizeInvalidErrorCode ErrorCode = "SIZE_INVALID" + TagInvalidErrorCode ErrorCode = "TAG_INVALID" + UnauthorizedErrorCode ErrorCode = "UNAUTHORIZED" + DeniedErrorCode ErrorCode = "DENIED" + UnsupportedErrorCode ErrorCode = "UNSUPPORTED" + TooManyRequestsErrorCode ErrorCode = "TOOMANYREQUESTS" + UnknownErrorCode ErrorCode = "UNKNOWN" + + // This isn't defined by either docker or OCI spec, but is defined by docker/distribution: + // https://github.com/distribution/distribution/blob/6a977a5a754baa213041443f841705888107362a/registry/api/errcode/register.go#L60 + UnavailableErrorCode ErrorCode = "UNAVAILABLE" +) + +// TODO: Include other error types. +var temporaryErrorCodes = map[ErrorCode]struct{}{ + BlobUploadInvalidErrorCode: {}, + TooManyRequestsErrorCode: {}, + UnknownErrorCode: {}, + UnavailableErrorCode: {}, +} + +var temporaryStatusCodes = map[int]struct{}{ + http.StatusRequestTimeout: {}, + http.StatusInternalServerError: {}, + http.StatusBadGateway: {}, + http.StatusServiceUnavailable: {}, + http.StatusGatewayTimeout: {}, +} + +// CheckError returns a structured error if the response status is not in codes. +func CheckError(resp *http.Response, codes ...int) error { + for _, code := range codes { + if resp.StatusCode == code { + // This is one of the supported status codes. + return nil + } + } + b, err := io.ReadAll(resp.Body) + if err != nil { + return err + } + + // https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors + structuredError := &Error{} + + // This can fail if e.g. the response body is not valid JSON. That's fine, + // we'll construct an appropriate error string from the body and status code. + _ = json.Unmarshal(b, structuredError) + + structuredError.rawBody = string(b) + structuredError.StatusCode = resp.StatusCode + structuredError.Request = resp.Request + + return structuredError +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go new file mode 100644 index 00000000..c341f844 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go @@ -0,0 +1,91 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "fmt" + "net/http" + "net/http/httputil" + "time" + + "github.com/google/go-containerregistry/internal/redact" + "github.com/google/go-containerregistry/pkg/logs" +) + +type logTransport struct { + inner http.RoundTripper +} + +// NewLogger returns a transport that logs requests and responses to +// github.com/google/go-containerregistry/pkg/logs.Debug. +func NewLogger(inner http.RoundTripper) http.RoundTripper { + return &logTransport{inner} +} + +func (t *logTransport) RoundTrip(in *http.Request) (out *http.Response, err error) { + // Inspired by: github.com/motemen/go-loghttp + + // We redact token responses and binary blobs in response/request. + omitBody, reason := redact.FromContext(in.Context()) + if omitBody { + logs.Debug.Printf("--> %s %s [body redacted: %s]", in.Method, in.URL, reason) + } else { + logs.Debug.Printf("--> %s %s", in.Method, in.URL) + } + + // Save these headers so we can redact Authorization. + savedHeaders := in.Header.Clone() + if in.Header != nil && in.Header.Get("authorization") != "" { + in.Header.Set("authorization", "") + } + + b, err := httputil.DumpRequestOut(in, !omitBody) + if err == nil { + logs.Debug.Println(string(b)) + } else { + logs.Debug.Printf("Failed to dump request %s %s: %v", in.Method, in.URL, err) + } + + // Restore the non-redacted headers. + in.Header = savedHeaders + + start := time.Now() + out, err = t.inner.RoundTrip(in) + duration := time.Since(start) + if err != nil { + logs.Debug.Printf("<-- %v %s %s (%s)", err, in.Method, in.URL, duration) + } + if out != nil { + msg := fmt.Sprintf("<-- %d", out.StatusCode) + if out.Request != nil { + msg = fmt.Sprintf("%s %s", msg, out.Request.URL) + } + msg = fmt.Sprintf("%s (%s)", msg, duration) + + if omitBody { + msg = fmt.Sprintf("%s [body redacted: %s]", msg, reason) + } + + logs.Debug.Print(msg) + + b, err := httputil.DumpResponse(out, !omitBody) + if err == nil { + logs.Debug.Println(string(b)) + } else { + logs.Debug.Printf("Failed to dump response %s %s: %v", in.Method, in.URL, err) + } + } + return +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go new file mode 100644 index 00000000..d852ef84 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go @@ -0,0 +1,227 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "strings" + "time" + + authchallenge "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/google/go-containerregistry/pkg/logs" + "github.com/google/go-containerregistry/pkg/name" +) + +type challenge string + +const ( + anonymous challenge = "anonymous" + basic challenge = "basic" + bearer challenge = "bearer" +) + +// 300ms is the default fallback period for go's DNS dialer but we could make this configurable. +var fallbackDelay = 300 * time.Millisecond + +type pingResp struct { + challenge challenge + + // Following the challenge there are often key/value pairs + // e.g. Bearer service="gcr.io",realm="https://auth.gcr.io/v36/tokenz" + parameters map[string]string + + // The registry's scheme to use. Communicates whether we fell back to http. + scheme string +} + +func (c challenge) Canonical() challenge { + return challenge(strings.ToLower(string(c))) +} + +func ping(ctx context.Context, reg name.Registry, t http.RoundTripper) (*pingResp, error) { + // This first attempts to use "https" for every request, falling back to http + // if the registry matches our localhost heuristic or if it is intentionally + // set to insecure via name.NewInsecureRegistry. + schemes := []string{"https"} + if reg.Scheme() == "http" { + schemes = append(schemes, "http") + } + if len(schemes) == 1 { + return pingSingle(ctx, reg, t, schemes[0]) + } + return pingParallel(ctx, reg, t, schemes) +} + +func pingSingle(ctx context.Context, reg name.Registry, t http.RoundTripper, scheme string) (*pingResp, error) { + client := http.Client{Transport: t} + url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name()) + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, err + } + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return nil, err + } + defer func() { + // By draining the body, make sure to reuse the connection made by + // the ping for the following access to the registry + io.Copy(io.Discard, resp.Body) + resp.Body.Close() + }() + + switch resp.StatusCode { + case http.StatusOK: + // If we get a 200, then no authentication is needed. + return &pingResp{ + challenge: anonymous, + scheme: scheme, + }, nil + case http.StatusUnauthorized: + if challenges := authchallenge.ResponseChallenges(resp); len(challenges) != 0 { + // If we hit more than one, let's try to find one that we know how to handle. + wac := pickFromMultipleChallenges(challenges) + return &pingResp{ + challenge: challenge(wac.Scheme).Canonical(), + parameters: wac.Parameters, + scheme: scheme, + }, nil + } + // Otherwise, just return the challenge without parameters. + return &pingResp{ + challenge: challenge(resp.Header.Get("WWW-Authenticate")).Canonical(), + scheme: scheme, + }, nil + default: + return nil, CheckError(resp, http.StatusOK, http.StatusUnauthorized) + } +} + +// Based on the golang happy eyeballs dialParallel impl in net/dial.go. +func pingParallel(ctx context.Context, reg name.Registry, t http.RoundTripper, schemes []string) (*pingResp, error) { + returned := make(chan struct{}) + defer close(returned) + + type pingResult struct { + *pingResp + error + primary bool + done bool + } + + results := make(chan pingResult) + + startRacer := func(ctx context.Context, scheme string) { + pr, err := pingSingle(ctx, reg, t, scheme) + select { + case results <- pingResult{pingResp: pr, error: err, primary: scheme == "https", done: true}: + case <-returned: + if pr != nil { + logs.Debug.Printf("%s lost race", scheme) + } + } + } + + var primary, fallback pingResult + + primaryCtx, primaryCancel := context.WithCancel(ctx) + defer primaryCancel() + go startRacer(primaryCtx, schemes[0]) + + fallbackTimer := time.NewTimer(fallbackDelay) + defer fallbackTimer.Stop() + + for { + select { + case <-fallbackTimer.C: + fallbackCtx, fallbackCancel := context.WithCancel(ctx) + defer fallbackCancel() + go startRacer(fallbackCtx, schemes[1]) + + case res := <-results: + if res.error == nil { + return res.pingResp, nil + } + if res.primary { + primary = res + } else { + fallback = res + } + if primary.done && fallback.done { + return nil, multierrs([]error{primary.error, fallback.error}) + } + if res.primary && fallbackTimer.Stop() { + // Primary failed and we haven't started the fallback, + // reset time to start fallback immediately. + fallbackTimer.Reset(0) + } + } + } +} + +func pickFromMultipleChallenges(challenges []authchallenge.Challenge) authchallenge.Challenge { + // It might happen there are multiple www-authenticate headers, e.g. `Negotiate` and `Basic`. + // Picking simply the first one could result eventually in `unrecognized challenge` error, + // that's why we're looping through the challenges in search for one that can be handled. + allowedSchemes := []string{"basic", "bearer"} + + for _, wac := range challenges { + currentScheme := strings.ToLower(wac.Scheme) + for _, allowed := range allowedSchemes { + if allowed == currentScheme { + return wac + } + } + } + + return challenges[0] +} + +type multierrs []error + +func (m multierrs) Error() string { + var b strings.Builder + hasWritten := false + for _, err := range m { + if hasWritten { + b.WriteString("; ") + } + hasWritten = true + b.WriteString(err.Error()) + } + return b.String() +} + +func (m multierrs) As(target any) bool { + for _, err := range m { + if errors.As(err, target) { + return true + } + } + return false +} + +func (m multierrs) Is(target error) bool { + for _, err := range m { + if errors.Is(err, target) { + return true + } + } + return false +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go new file mode 100644 index 00000000..e5621e30 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go @@ -0,0 +1,111 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net/http" + "time" + + "github.com/google/go-containerregistry/internal/retry" +) + +// Sleep for 0.1 then 0.3 seconds. This should cover networking blips. +var defaultBackoff = retry.Backoff{ + Duration: 100 * time.Millisecond, + Factor: 3.0, + Jitter: 0.1, + Steps: 3, +} + +var _ http.RoundTripper = (*retryTransport)(nil) + +// retryTransport wraps a RoundTripper and retries temporary network errors. +type retryTransport struct { + inner http.RoundTripper + backoff retry.Backoff + predicate retry.Predicate + codes []int +} + +// Option is a functional option for retryTransport. +type Option func(*options) + +type options struct { + backoff retry.Backoff + predicate retry.Predicate + codes []int +} + +// Backoff is an alias of retry.Backoff to expose this configuration option to consumers of this lib +type Backoff = retry.Backoff + +// WithRetryBackoff sets the backoff for retry operations. +func WithRetryBackoff(backoff Backoff) Option { + return func(o *options) { + o.backoff = backoff + } +} + +// WithRetryPredicate sets the predicate for retry operations. +func WithRetryPredicate(predicate func(error) bool) Option { + return func(o *options) { + o.predicate = predicate + } +} + +// WithRetryStatusCodes sets which http response codes will be retried. +func WithRetryStatusCodes(codes ...int) Option { + return func(o *options) { + o.codes = codes + } +} + +// NewRetry returns a transport that retries errors. +func NewRetry(inner http.RoundTripper, opts ...Option) http.RoundTripper { + o := &options{ + backoff: defaultBackoff, + predicate: retry.IsTemporary, + } + + for _, opt := range opts { + opt(o) + } + + return &retryTransport{ + inner: inner, + backoff: o.backoff, + predicate: o.predicate, + codes: o.codes, + } +} + +func (t *retryTransport) RoundTrip(in *http.Request) (out *http.Response, err error) { + roundtrip := func() error { + out, err = t.inner.RoundTrip(in) + if !retry.Ever(in.Context()) { + return nil + } + if out != nil { + for _, code := range t.codes { + if out.StatusCode == code { + return CheckError(out) + } + } + } + return err + } + retry.Retry(roundtrip, t.predicate, t.backoff) + return +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go new file mode 100644 index 00000000..d70b6a85 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go @@ -0,0 +1,44 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net/http" + + "github.com/google/go-containerregistry/pkg/name" +) + +type schemeTransport struct { + // Scheme we should use, determined by ping response. + scheme string + + // Registry we're talking to. + registry name.Registry + + // Wrapped by schemeTransport. + inner http.RoundTripper +} + +// RoundTrip implements http.RoundTripper +func (st *schemeTransport) RoundTrip(in *http.Request) (*http.Response, error) { + // When we ping() the registry, we determine whether to use http or https + // based on which scheme was successful. That is only valid for the + // registry server and not e.g. a separate token server or blob storage, + // so we should only override the scheme if the host is the registry. + if matchesHost(st.registry, in, st.scheme) { + in.URL.Scheme = st.scheme + } + return st.inner.RoundTrip(in) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go new file mode 100644 index 00000000..c3b56f7a --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go @@ -0,0 +1,24 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +// Scopes suitable to qualify each Repository +const ( + PullScope string = "pull" + PushScope string = "push,pull" + // For now DELETE is PUSH, which is the read/write ACL. + DeleteScope string = PushScope + CatalogScope string = "catalog" +) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go new file mode 100644 index 00000000..01fe1fa8 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go @@ -0,0 +1,116 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "fmt" + "net/http" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" +) + +// New returns a new RoundTripper based on the provided RoundTripper that has been +// setup to authenticate with the remote registry "reg", in the capacity +// laid out by the specified scopes. +// +// Deprecated: Use NewWithContext. +func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) { + return NewWithContext(context.Background(), reg, auth, t, scopes) +} + +// NewWithContext returns a new RoundTripper based on the provided RoundTripper that has been +// set up to authenticate with the remote registry "reg", in the capacity +// laid out by the specified scopes. +// In case the RoundTripper is already of the type Wrapper it assumes +// authentication was already done prior to this call, so it just returns +// the provided RoundTripper without further action +func NewWithContext(ctx context.Context, reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) { + // When the transport provided is of the type Wrapper this function assumes that the caller already + // executed the necessary login and check. + switch t.(type) { + case *Wrapper: + return t, nil + } + // The handshake: + // 1. Use "t" to ping() the registry for the authentication challenge. + // + // 2a. If we get back a 200, then simply use "t". + // + // 2b. If we get back a 401 with a Basic challenge, then use a transport + // that just attachs auth each roundtrip. + // + // 2c. If we get back a 401 with a Bearer challenge, then use a transport + // that attaches a bearer token to each request, and refreshes is on 401s. + // Perform an initial refresh to seed the bearer token. + + // First we ping the registry to determine the parameters of the authentication handshake + // (if one is even necessary). + pr, err := ping(ctx, reg, t) + if err != nil { + return nil, err + } + + // Wrap t with a useragent transport unless we already have one. + if _, ok := t.(*userAgentTransport); !ok { + t = NewUserAgent(t, "") + } + + // Wrap t in a transport that selects the appropriate scheme based on the ping response. + t = &schemeTransport{ + scheme: pr.scheme, + registry: reg, + inner: t, + } + + switch pr.challenge.Canonical() { + case anonymous, basic: + return &Wrapper{&basicTransport{inner: t, auth: auth, target: reg.RegistryStr()}}, nil + case bearer: + // We require the realm, which tells us where to send our Basic auth to turn it into Bearer auth. + realm, ok := pr.parameters["realm"] + if !ok { + return nil, fmt.Errorf("malformed www-authenticate, missing realm: %v", pr.parameters) + } + service := pr.parameters["service"] + bt := &bearerTransport{ + inner: t, + basic: auth, + realm: realm, + registry: reg, + service: service, + scopes: scopes, + scheme: pr.scheme, + } + if err := bt.refresh(ctx); err != nil { + return nil, err + } + return &Wrapper{bt}, nil + default: + return nil, fmt.Errorf("unrecognized challenge: %s", pr.challenge) + } +} + +// Wrapper results in *not* wrapping supplied transport with additional logic such as retries, useragent and debug logging +// Consumers are opt-ing into providing their own transport without any additional wrapping. +type Wrapper struct { + inner http.RoundTripper +} + +// RoundTrip delegates to the inner RoundTripper +func (w *Wrapper) RoundTrip(in *http.Request) (*http.Response, error) { + return w.inner.RoundTrip(in) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go new file mode 100644 index 00000000..74a9e71b --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go @@ -0,0 +1,94 @@ +// Copyright 2019 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "fmt" + "net/http" + "runtime/debug" +) + +var ( + // Version can be set via: + // -ldflags="-X 'github.com/google/go-containerregistry/pkg/v1/remote/transport.Version=$TAG'" + Version string + + ggcrVersion = defaultUserAgent +) + +const ( + defaultUserAgent = "go-containerregistry" + moduleName = "github.com/google/go-containerregistry" +) + +type userAgentTransport struct { + inner http.RoundTripper + ua string +} + +func init() { + if v := version(); v != "" { + ggcrVersion = fmt.Sprintf("%s/%s", defaultUserAgent, v) + } +} + +func version() string { + if Version != "" { + // Version was set via ldflags, just return it. + return Version + } + + info, ok := debug.ReadBuildInfo() + if !ok { + return "" + } + + // Happens for crane and gcrane. + if info.Main.Path == moduleName { + return info.Main.Version + } + + // Anything else. + for _, dep := range info.Deps { + if dep.Path == moduleName { + return dep.Version + } + } + + return "" +} + +// NewUserAgent returns an http.Roundtripper that sets the user agent to +// The provided string plus additional go-containerregistry information, +// e.g. if provided "crane/v0.1.4" and this modules was built at v0.1.4: +// +// User-Agent: crane/v0.1.4 go-containerregistry/v0.1.4 +func NewUserAgent(inner http.RoundTripper, ua string) http.RoundTripper { + if ua == "" { + ua = ggcrVersion + } else { + ua = fmt.Sprintf("%s %s", ua, ggcrVersion) + } + return &userAgentTransport{ + inner: inner, + ua: ua, + } +} + +// RoundTrip implements http.RoundTripper +func (ut *userAgentTransport) RoundTrip(in *http.Request) (*http.Response, error) { + in.Header.Set("User-Agent", ut.ua) + return ut.inner.RoundTrip(in) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go new file mode 100644 index 00000000..99e9fe83 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go @@ -0,0 +1,876 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + + "github.com/google/go-containerregistry/internal/redact" + "github.com/google/go-containerregistry/internal/retry" + "github.com/google/go-containerregistry/pkg/logs" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/stream" + "github.com/google/go-containerregistry/pkg/v1/types" + "golang.org/x/sync/errgroup" +) + +// Taggable is an interface that enables a manifest PUT (e.g. for tagging). +type Taggable interface { + RawManifest() ([]byte, error) +} + +// Write pushes the provided img to the specified image reference. +func Write(ref name.Reference, img v1.Image, options ...Option) (rerr error) { + o, err := makeOptions(ref.Context(), options...) + if err != nil { + return err + } + + var p *progress + if o.updates != nil { + p = &progress{updates: o.updates} + p.lastUpdate = &v1.Update{} + p.lastUpdate.Total, err = countImage(img, o.allowNondistributableArtifacts) + if err != nil { + return err + } + defer close(o.updates) + defer func() { _ = p.err(rerr) }() + } + return writeImage(o.context, ref, img, o, p) +} + +func writeImage(ctx context.Context, ref name.Reference, img v1.Image, o *options, progress *progress) error { + ls, err := img.Layers() + if err != nil { + return err + } + scopes := scopesForUploadingImage(ref.Context(), ls) + tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) + if err != nil { + return err + } + w := writer{ + repo: ref.Context(), + client: &http.Client{Transport: tr}, + progress: progress, + backoff: o.retryBackoff, + predicate: o.retryPredicate, + } + + // Upload individual blobs and collect any errors. + blobChan := make(chan v1.Layer, 2*o.jobs) + g, gctx := errgroup.WithContext(ctx) + for i := 0; i < o.jobs; i++ { + // Start N workers consuming blobs to upload. + g.Go(func() error { + for b := range blobChan { + if err := w.uploadOne(gctx, b); err != nil { + return err + } + } + return nil + }) + } + + // Upload individual layers in goroutines and collect any errors. + // If we can dedupe by the layer digest, try to do so. If we can't determine + // the digest for whatever reason, we can't dedupe and might re-upload. + g.Go(func() error { + defer close(blobChan) + uploaded := map[v1.Hash]bool{} + for _, l := range ls { + l := l + + // Handle foreign layers. + mt, err := l.MediaType() + if err != nil { + return err + } + if !mt.IsDistributable() && !o.allowNondistributableArtifacts { + continue + } + + // Streaming layers calculate their digests while uploading them. Assume + // an error here indicates we need to upload the layer. + h, err := l.Digest() + if err == nil { + // If we can determine the layer's digest ahead of + // time, use it to dedupe uploads. + if uploaded[h] { + continue // Already uploading. + } + uploaded[h] = true + } + select { + case blobChan <- l: + case <-gctx.Done(): + return gctx.Err() + } + } + return nil + }) + + if l, err := partial.ConfigLayer(img); err != nil { + // We can't read the ConfigLayer, possibly because of streaming layers, + // since the layer DiffIDs haven't been calculated yet. Attempt to wait + // for the other layers to be uploaded, then try the config again. + if err := g.Wait(); err != nil { + return err + } + + // Now that all the layers are uploaded, try to upload the config file blob. + l, err := partial.ConfigLayer(img) + if err != nil { + return err + } + if err := w.uploadOne(ctx, l); err != nil { + return err + } + } else { + // We *can* read the ConfigLayer, so upload it concurrently with the layers. + g.Go(func() error { + return w.uploadOne(gctx, l) + }) + + // Wait for the layers + config. + if err := g.Wait(); err != nil { + return err + } + } + + // With all of the constituent elements uploaded, upload the manifest + // to commit the image. + return w.commitManifest(ctx, img, ref) +} + +// writer writes the elements of an image to a remote image reference. +type writer struct { + repo name.Repository + client *http.Client + + progress *progress + backoff Backoff + predicate retry.Predicate +} + +// url returns a url.Url for the specified path in the context of this remote image reference. +func (w *writer) url(path string) url.URL { + return url.URL{ + Scheme: w.repo.Registry.Scheme(), + Host: w.repo.RegistryStr(), + Path: path, + } +} + +// nextLocation extracts the fully-qualified URL to which we should send the next request in an upload sequence. +func (w *writer) nextLocation(resp *http.Response) (string, error) { + loc := resp.Header.Get("Location") + if len(loc) == 0 { + return "", errors.New("missing Location header") + } + u, err := url.Parse(loc) + if err != nil { + return "", err + } + + // If the location header returned is just a url path, then fully qualify it. + // We cannot simply call w.url, since there might be an embedded query string. + return resp.Request.URL.ResolveReference(u).String(), nil +} + +// checkExistingBlob checks if a blob exists already in the repository by making a +// HEAD request to the blob store API. GCR performs an existence check on the +// initiation if "mount" is specified, even if no "from" sources are specified. +// However, this is not broadly applicable to all registries, e.g. ECR. +func (w *writer) checkExistingBlob(ctx context.Context, h v1.Hash) (bool, error) { + u := w.url(fmt.Sprintf("/v2/%s/blobs/%s", w.repo.RepositoryStr(), h.String())) + + req, err := http.NewRequest(http.MethodHead, u.String(), nil) + if err != nil { + return false, err + } + + resp, err := w.client.Do(req.WithContext(ctx)) + if err != nil { + return false, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { + return false, err + } + + return resp.StatusCode == http.StatusOK, nil +} + +// checkExistingManifest checks if a manifest exists already in the repository +// by making a HEAD request to the manifest API. +func (w *writer) checkExistingManifest(ctx context.Context, h v1.Hash, mt types.MediaType) (bool, error) { + u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), h.String())) + + req, err := http.NewRequest(http.MethodHead, u.String(), nil) + if err != nil { + return false, err + } + req.Header.Set("Accept", string(mt)) + + resp, err := w.client.Do(req.WithContext(ctx)) + if err != nil { + return false, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { + return false, err + } + + return resp.StatusCode == http.StatusOK, nil +} + +// initiateUpload initiates the blob upload, which starts with a POST that can +// optionally include the hash of the layer and a list of repositories from +// which that layer might be read. On failure, an error is returned. +// On success, the layer was either mounted (nothing more to do) or a blob +// upload was initiated and the body of that blob should be sent to the returned +// location. +func (w *writer) initiateUpload(ctx context.Context, from, mount, origin string) (location string, mounted bool, err error) { + u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.repo.RepositoryStr())) + uv := url.Values{} + if mount != "" && from != "" { + // Quay will fail if we specify a "mount" without a "from". + uv.Set("mount", mount) + uv.Set("from", from) + if origin != "" { + uv.Set("origin", origin) + } + } + u.RawQuery = uv.Encode() + + // Make the request to initiate the blob upload. + req, err := http.NewRequest(http.MethodPost, u.String(), nil) + if err != nil { + return "", false, err + } + req.Header.Set("Content-Type", "application/json") + resp, err := w.client.Do(req.WithContext(ctx)) + if err != nil { + return "", false, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil { + if origin != "" && origin != w.repo.RegistryStr() { + // https://github.com/google/go-containerregistry/issues/1404 + logs.Warn.Printf("retrying without mount: %v", err) + return w.initiateUpload(ctx, "", "", "") + } + return "", false, err + } + + // Check the response code to determine the result. + switch resp.StatusCode { + case http.StatusCreated: + // We're done, we were able to fast-path. + return "", true, nil + case http.StatusAccepted: + // Proceed to PATCH, upload has begun. + loc, err := w.nextLocation(resp) + return loc, false, err + default: + panic("Unreachable: initiateUpload") + } +} + +// streamBlob streams the contents of the blob to the specified location. +// On failure, this will return an error. On success, this will return the location +// header indicating how to commit the streamed blob. +func (w *writer) streamBlob(ctx context.Context, layer v1.Layer, streamLocation string) (commitLocation string, rerr error) { + reset := func() {} + defer func() { + if rerr != nil { + reset() + } + }() + blob, err := layer.Compressed() + if err != nil { + return "", err + } + + getBody := layer.Compressed + if w.progress != nil { + var count int64 + blob = &progressReader{rc: blob, progress: w.progress, count: &count} + getBody = func() (io.ReadCloser, error) { + blob, err := layer.Compressed() + if err != nil { + return nil, err + } + return &progressReader{rc: blob, progress: w.progress, count: &count}, nil + } + reset = func() { + w.progress.complete(-count) + } + } + + req, err := http.NewRequest(http.MethodPatch, streamLocation, blob) + if err != nil { + return "", err + } + if _, ok := layer.(*stream.Layer); !ok { + // We can't retry streaming layers. + req.GetBody = getBody + } + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := w.client.Do(req.WithContext(ctx)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil { + return "", err + } + + // The blob has been uploaded, return the location header indicating + // how to commit this layer. + return w.nextLocation(resp) +} + +// commitBlob commits this blob by sending a PUT to the location returned from +// streaming the blob. +func (w *writer) commitBlob(ctx context.Context, location, digest string) error { + u, err := url.Parse(location) + if err != nil { + return err + } + v := u.Query() + v.Set("digest", digest) + u.RawQuery = v.Encode() + + req, err := http.NewRequest(http.MethodPut, u.String(), nil) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := w.client.Do(req.WithContext(ctx)) + if err != nil { + return err + } + defer resp.Body.Close() + + return transport.CheckError(resp, http.StatusCreated) +} + +// incrProgress increments and sends a progress update, if WithProgress is used. +func (w *writer) incrProgress(written int64) { + if w.progress == nil { + return + } + w.progress.complete(written) +} + +// uploadOne performs a complete upload of a single layer. +func (w *writer) uploadOne(ctx context.Context, l v1.Layer) error { + tryUpload := func() error { + ctx := retry.Never(ctx) + var from, mount, origin string + if h, err := l.Digest(); err == nil { + // If we know the digest, this isn't a streaming layer. Do an existence + // check so we can skip uploading the layer if possible. + existing, err := w.checkExistingBlob(ctx, h) + if err != nil { + return err + } + if existing { + size, err := l.Size() + if err != nil { + return err + } + w.incrProgress(size) + logs.Progress.Printf("existing blob: %v", h) + return nil + } + + mount = h.String() + } + if ml, ok := l.(*MountableLayer); ok { + from = ml.Reference.Context().RepositoryStr() + origin = ml.Reference.Context().RegistryStr() + } + + location, mounted, err := w.initiateUpload(ctx, from, mount, origin) + if err != nil { + return err + } else if mounted { + size, err := l.Size() + if err != nil { + return err + } + w.incrProgress(size) + h, err := l.Digest() + if err != nil { + return err + } + logs.Progress.Printf("mounted blob: %s", h.String()) + return nil + } + + // Only log layers with +json or +yaml. We can let through other stuff if it becomes popular. + // TODO(opencontainers/image-spec#791): Would be great to have an actual parser. + mt, err := l.MediaType() + if err != nil { + return err + } + smt := string(mt) + if !(strings.HasSuffix(smt, "+json") || strings.HasSuffix(smt, "+yaml")) { + ctx = redact.NewContext(ctx, "omitting binary blobs from logs") + } + + location, err = w.streamBlob(ctx, l, location) + if err != nil { + return err + } + + h, err := l.Digest() + if err != nil { + return err + } + digest := h.String() + + if err := w.commitBlob(ctx, location, digest); err != nil { + return err + } + logs.Progress.Printf("pushed blob: %s", digest) + return nil + } + + return retry.Retry(tryUpload, w.predicate, w.backoff) +} + +type withLayer interface { + Layer(v1.Hash) (v1.Layer, error) +} + +func (w *writer) writeIndex(ctx context.Context, ref name.Reference, ii v1.ImageIndex, options ...Option) error { + index, err := ii.IndexManifest() + if err != nil { + return err + } + + o, err := makeOptions(ref.Context(), options...) + if err != nil { + return err + } + + // TODO(#803): Pipe through remote.WithJobs and upload these in parallel. + for _, desc := range index.Manifests { + ref := ref.Context().Digest(desc.Digest.String()) + exists, err := w.checkExistingManifest(ctx, desc.Digest, desc.MediaType) + if err != nil { + return err + } + if exists { + logs.Progress.Print("existing manifest: ", desc.Digest) + continue + } + + switch desc.MediaType { + case types.OCIImageIndex, types.DockerManifestList: + ii, err := ii.ImageIndex(desc.Digest) + if err != nil { + return err + } + if err := w.writeIndex(ctx, ref, ii, options...); err != nil { + return err + } + case types.OCIManifestSchema1, types.DockerManifestSchema2: + img, err := ii.Image(desc.Digest) + if err != nil { + return err + } + if err := writeImage(ctx, ref, img, o, w.progress); err != nil { + return err + } + default: + // Workaround for #819. + if wl, ok := ii.(withLayer); ok { + layer, err := wl.Layer(desc.Digest) + if err != nil { + return err + } + if err := w.uploadOne(ctx, layer); err != nil { + return err + } + } + } + } + + // With all of the constituent elements uploaded, upload the manifest + // to commit the image. + return w.commitManifest(ctx, ii, ref) +} + +type withMediaType interface { + MediaType() (types.MediaType, error) +} + +// This is really silly, but go interfaces don't let me satisfy remote.Taggable +// with remote.Descriptor because of name collisions between method names and +// struct fields. +// +// Use reflection to either pull the v1.Descriptor out of remote.Descriptor or +// create a descriptor based on the RawManifest and (optionally) MediaType. +func unpackTaggable(t Taggable) ([]byte, *v1.Descriptor, error) { + if d, ok := t.(*Descriptor); ok { + return d.Manifest, &d.Descriptor, nil + } + b, err := t.RawManifest() + if err != nil { + return nil, nil, err + } + + // A reasonable default if Taggable doesn't implement MediaType. + mt := types.DockerManifestSchema2 + + if wmt, ok := t.(withMediaType); ok { + m, err := wmt.MediaType() + if err != nil { + return nil, nil, err + } + mt = m + } + + h, sz, err := v1.SHA256(bytes.NewReader(b)) + if err != nil { + return nil, nil, err + } + + return b, &v1.Descriptor{ + MediaType: mt, + Size: sz, + Digest: h, + }, nil +} + +// commitManifest does a PUT of the image's manifest. +func (w *writer) commitManifest(ctx context.Context, t Taggable, ref name.Reference) error { + tryUpload := func() error { + ctx := retry.Never(ctx) + raw, desc, err := unpackTaggable(t) + if err != nil { + return err + } + + u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.repo.RepositoryStr(), ref.Identifier())) + + // Make the request to PUT the serialized manifest + req, err := http.NewRequest(http.MethodPut, u.String(), bytes.NewBuffer(raw)) + if err != nil { + return err + } + req.Header.Set("Content-Type", string(desc.MediaType)) + + resp, err := w.client.Do(req.WithContext(ctx)) + if err != nil { + return err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil { + return err + } + + // The image was successfully pushed! + logs.Progress.Printf("%v: digest: %v size: %d", ref, desc.Digest, desc.Size) + w.incrProgress(int64(len(raw))) + return nil + } + + return retry.Retry(tryUpload, w.predicate, w.backoff) +} + +func scopesForUploadingImage(repo name.Repository, layers []v1.Layer) []string { + // use a map as set to remove duplicates scope strings + scopeSet := map[string]struct{}{} + + for _, l := range layers { + if ml, ok := l.(*MountableLayer); ok { + // we will add push scope for ref.Context() after the loop. + // for now we ask pull scope for references of the same registry + if ml.Reference.Context().String() != repo.String() && ml.Reference.Context().Registry.String() == repo.Registry.String() { + scopeSet[ml.Reference.Scope(transport.PullScope)] = struct{}{} + } + } + } + + scopes := make([]string, 0) + // Push scope should be the first element because a few registries just look at the first scope to determine access. + scopes = append(scopes, repo.Scope(transport.PushScope)) + + for scope := range scopeSet { + scopes = append(scopes, scope) + } + + return scopes +} + +// WriteIndex pushes the provided ImageIndex to the specified image reference. +// WriteIndex will attempt to push all of the referenced manifests before +// attempting to push the ImageIndex, to retain referential integrity. +func WriteIndex(ref name.Reference, ii v1.ImageIndex, options ...Option) (rerr error) { + o, err := makeOptions(ref.Context(), options...) + if err != nil { + return err + } + + scopes := []string{ref.Scope(transport.PushScope)} + tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) + if err != nil { + return err + } + w := writer{ + repo: ref.Context(), + client: &http.Client{Transport: tr}, + backoff: o.retryBackoff, + predicate: o.retryPredicate, + } + + if o.updates != nil { + w.progress = &progress{updates: o.updates} + w.progress.lastUpdate = &v1.Update{} + + defer close(o.updates) + defer func() { w.progress.err(rerr) }() + + w.progress.lastUpdate.Total, err = countIndex(ii, o.allowNondistributableArtifacts) + if err != nil { + return err + } + } + + return w.writeIndex(o.context, ref, ii, options...) +} + +// countImage counts the total size of all layers + config blob + manifest for +// an image. It de-dupes duplicate layers. +func countImage(img v1.Image, allowNondistributableArtifacts bool) (int64, error) { + var total int64 + ls, err := img.Layers() + if err != nil { + return 0, err + } + seen := map[v1.Hash]bool{} + for _, l := range ls { + // Handle foreign layers. + mt, err := l.MediaType() + if err != nil { + return 0, err + } + if !mt.IsDistributable() && !allowNondistributableArtifacts { + continue + } + + // TODO: support streaming layers which update the total count as they write. + if _, ok := l.(*stream.Layer); ok { + return 0, errors.New("cannot use stream.Layer and WithProgress") + } + + // Dedupe layers. + d, err := l.Digest() + if err != nil { + return 0, err + } + if seen[d] { + continue + } + seen[d] = true + + size, err := l.Size() + if err != nil { + return 0, err + } + total += size + } + b, err := img.RawConfigFile() + if err != nil { + return 0, err + } + total += int64(len(b)) + size, err := img.Size() + if err != nil { + return 0, err + } + total += size + return total, nil +} + +// countIndex counts the total size of all images + sub-indexes for an index. +// It does not attempt to de-dupe duplicate images, etc. +func countIndex(idx v1.ImageIndex, allowNondistributableArtifacts bool) (int64, error) { + var total int64 + mf, err := idx.IndexManifest() + if err != nil { + return 0, err + } + + for _, desc := range mf.Manifests { + switch desc.MediaType { + case types.OCIImageIndex, types.DockerManifestList: + sidx, err := idx.ImageIndex(desc.Digest) + if err != nil { + return 0, err + } + size, err := countIndex(sidx, allowNondistributableArtifacts) + if err != nil { + return 0, err + } + total += size + case types.OCIManifestSchema1, types.DockerManifestSchema2: + simg, err := idx.Image(desc.Digest) + if err != nil { + return 0, err + } + size, err := countImage(simg, allowNondistributableArtifacts) + if err != nil { + return 0, err + } + total += size + default: + // Workaround for #819. + if wl, ok := idx.(withLayer); ok { + layer, err := wl.Layer(desc.Digest) + if err != nil { + return 0, err + } + size, err := layer.Size() + if err != nil { + return 0, err + } + total += size + } + } + } + + size, err := idx.Size() + if err != nil { + return 0, err + } + total += size + return total, nil +} + +// WriteLayer uploads the provided Layer to the specified repo. +func WriteLayer(repo name.Repository, layer v1.Layer, options ...Option) (rerr error) { + o, err := makeOptions(repo, options...) + if err != nil { + return err + } + scopes := scopesForUploadingImage(repo, []v1.Layer{layer}) + tr, err := transport.NewWithContext(o.context, repo.Registry, o.auth, o.transport, scopes) + if err != nil { + return err + } + w := writer{ + repo: repo, + client: &http.Client{Transport: tr}, + backoff: o.retryBackoff, + predicate: o.retryPredicate, + } + + if o.updates != nil { + w.progress = &progress{updates: o.updates} + w.progress.lastUpdate = &v1.Update{} + + defer close(o.updates) + defer func() { w.progress.err(rerr) }() + + // TODO: support streaming layers which update the total count as they write. + if _, ok := layer.(*stream.Layer); ok { + return errors.New("cannot use stream.Layer and WithProgress") + } + size, err := layer.Size() + if err != nil { + return err + } + w.progress.total(size) + } + return w.uploadOne(o.context, layer) +} + +// Tag adds a tag to the given Taggable via PUT /v2/.../manifests/ +// +// Notable implementations of Taggable are v1.Image, v1.ImageIndex, and +// remote.Descriptor. +// +// If t implements MediaType, we will use that for the Content-Type, otherwise +// we will default to types.DockerManifestSchema2. +// +// Tag does not attempt to write anything other than the manifest, so callers +// should ensure that all blobs or manifests that are referenced by t exist +// in the target registry. +func Tag(tag name.Tag, t Taggable, options ...Option) error { + return Put(tag, t, options...) +} + +// Put adds a manifest from the given Taggable via PUT /v1/.../manifest/ +// +// Notable implementations of Taggable are v1.Image, v1.ImageIndex, and +// remote.Descriptor. +// +// If t implements MediaType, we will use that for the Content-Type, otherwise +// we will default to types.DockerManifestSchema2. +// +// Put does not attempt to write anything other than the manifest, so callers +// should ensure that all blobs or manifests that are referenced by t exist +// in the target registry. +func Put(ref name.Reference, t Taggable, options ...Option) error { + o, err := makeOptions(ref.Context(), options...) + if err != nil { + return err + } + scopes := []string{ref.Scope(transport.PushScope)} + + // TODO: This *always* does a token exchange. For some registries, + // that's pretty slow. Some ideas; + // * Tag could take a list of tags. + // * Allow callers to pass in a transport.Transport, typecheck + // it to allow them to reuse the transport across multiple calls. + // * WithTag option to do multiple manifest PUTs in commitManifest. + tr, err := transport.NewWithContext(o.context, ref.Context().Registry, o.auth, o.transport, scopes) + if err != nil { + return err + } + w := writer{ + repo: ref.Context(), + client: &http.Client{Transport: tr}, + backoff: o.retryBackoff, + predicate: o.retryPredicate, + } + + return w.commitManifest(o.context, t, ref) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md new file mode 100644 index 00000000..da0dda48 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md @@ -0,0 +1,68 @@ +# `stream` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/stream) + +The `stream` package contains an implementation of +[`v1.Layer`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1#Layer) +that supports _streaming_ access, i.e. the layer contents are read once and not +buffered. + +## Usage + +```go +package main + +import ( + "os" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/google/go-containerregistry/pkg/v1/stream" +) + +// upload the contents of stdin as a layer to a local registry +func main() { + repo, err := name.NewRepository("localhost:5000/stream") + if err != nil { + panic(err) + } + + layer := stream.NewLayer(os.Stdin) + + if err := remote.WriteLayer(repo, layer); err != nil { + panic(err) + } +} +``` + +## Structure + +This implements the layer portion of an [image +upload](/pkg/v1/remote#anatomy-of-an-image-upload). We launch a goroutine that +is responsible for hashing the uncompressed contents to compute the `DiffID`, +gzipping them to produce the `Compressed` contents, and hashing/counting the +bytes to produce the `Digest`/`Size`. This goroutine writes to an +`io.PipeWriter`, which blocks until `Compressed` reads the gzipped contents from +the corresponding `io.PipeReader`. + +

+ +

+ +## Caveats + +This assumes that you have an uncompressed layer (i.e. a tarball) and would like +to compress it. Calling `Uncompressed` is always an error. Likewise, other +methods are invalid until the contents of `Compressed` have been completely +consumed and `Close`d. + +Using a `stream.Layer` will likely not work without careful consideration. For +example, in the `mutate` package, we defer computing the manifest and config +file until they are actually called. This allows you to `mutate.Append` a +streaming layer to an image without accidentally consuming it. Similarly, in +`remote.Write`, if calling `Digest` on a layer fails, we attempt to upload the +layer anyway, understanding that we may be dealing with a `stream.Layer` whose +contents need to be uploaded before we can upload the config file. + +Given the [structure](#structure) of how this is implemented, forgetting to +`Close` a `stream.Layer` will leak a goroutine. diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go new file mode 100644 index 00000000..9dd3e5ff --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go @@ -0,0 +1,273 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package stream implements a single-pass streaming v1.Layer. +package stream + +import ( + "bufio" + "compress/gzip" + "crypto/sha256" + "encoding/hex" + "errors" + "hash" + "io" + "os" + "sync" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +var ( + // ErrNotComputed is returned when the requested value is not yet + // computed because the stream has not been consumed yet. + ErrNotComputed = errors.New("value not computed until stream is consumed") + + // ErrConsumed is returned by Compressed when the underlying stream has + // already been consumed and closed. + ErrConsumed = errors.New("stream was already consumed") +) + +// Layer is a streaming implementation of v1.Layer. +type Layer struct { + blob io.ReadCloser + consumed bool + compression int + + mu sync.Mutex + digest, diffID *v1.Hash + size int64 + mediaType types.MediaType +} + +var _ v1.Layer = (*Layer)(nil) + +// LayerOption applies options to layer +type LayerOption func(*Layer) + +// WithCompressionLevel sets the gzip compression. See `gzip.NewWriterLevel` for possible values. +func WithCompressionLevel(level int) LayerOption { + return func(l *Layer) { + l.compression = level + } +} + +// WithMediaType is a functional option for overriding the layer's media type. +func WithMediaType(mt types.MediaType) LayerOption { + return func(l *Layer) { + l.mediaType = mt + } +} + +// NewLayer creates a Layer from an io.ReadCloser. +func NewLayer(rc io.ReadCloser, opts ...LayerOption) *Layer { + layer := &Layer{ + blob: rc, + compression: gzip.BestSpeed, + // We use DockerLayer for now as uncompressed layers + // are unimplemented + mediaType: types.DockerLayer, + } + + for _, opt := range opts { + opt(layer) + } + + return layer +} + +// Digest implements v1.Layer. +func (l *Layer) Digest() (v1.Hash, error) { + l.mu.Lock() + defer l.mu.Unlock() + if l.digest == nil { + return v1.Hash{}, ErrNotComputed + } + return *l.digest, nil +} + +// DiffID implements v1.Layer. +func (l *Layer) DiffID() (v1.Hash, error) { + l.mu.Lock() + defer l.mu.Unlock() + if l.diffID == nil { + return v1.Hash{}, ErrNotComputed + } + return *l.diffID, nil +} + +// Size implements v1.Layer. +func (l *Layer) Size() (int64, error) { + l.mu.Lock() + defer l.mu.Unlock() + if l.size == 0 { + return 0, ErrNotComputed + } + return l.size, nil +} + +// MediaType implements v1.Layer +func (l *Layer) MediaType() (types.MediaType, error) { + return l.mediaType, nil +} + +// Uncompressed implements v1.Layer. +func (l *Layer) Uncompressed() (io.ReadCloser, error) { + return nil, errors.New("NYI: stream.Layer.Uncompressed is not implemented") +} + +// Compressed implements v1.Layer. +func (l *Layer) Compressed() (io.ReadCloser, error) { + if l.consumed { + return nil, ErrConsumed + } + return newCompressedReader(l) +} + +// finalize sets the layer to consumed and computes all hash and size values. +func (l *Layer) finalize(uncompressed, compressed hash.Hash, size int64) error { + l.mu.Lock() + defer l.mu.Unlock() + + diffID, err := v1.NewHash("sha256:" + hex.EncodeToString(uncompressed.Sum(nil))) + if err != nil { + return err + } + l.diffID = &diffID + + digest, err := v1.NewHash("sha256:" + hex.EncodeToString(compressed.Sum(nil))) + if err != nil { + return err + } + l.digest = &digest + + l.size = size + l.consumed = true + return nil +} + +type compressedReader struct { + pr io.Reader + closer func() error +} + +func newCompressedReader(l *Layer) (*compressedReader, error) { + // Collect digests of compressed and uncompressed stream and size of + // compressed stream. + h := sha256.New() + zh := sha256.New() + count := &countWriter{} + + // gzip.Writer writes to the output stream via pipe, a hasher to + // capture compressed digest, and a countWriter to capture compressed + // size. + pr, pw := io.Pipe() + + // Write compressed bytes to be read by the pipe.Reader, hashed by zh, and counted by count. + mw := io.MultiWriter(pw, zh, count) + + // Buffer the output of the gzip writer so we don't have to wait on pr to keep writing. + // 64K ought to be small enough for anybody. + bw := bufio.NewWriterSize(mw, 2<<16) + zw, err := gzip.NewWriterLevel(bw, l.compression) + if err != nil { + return nil, err + } + + doneDigesting := make(chan struct{}) + + cr := &compressedReader{ + pr: pr, + closer: func() error { + // Immediately close pw without error. There are three ways to get + // here. + // + // 1. There was a copy error due from the underlying reader, in which + // case the error will not be overwritten. + // 2. Copying from the underlying reader completed successfully. + // 3. Close has been called before the underlying reader has been + // fully consumed. In this case pw must be closed in order to + // keep the flush of bw from blocking indefinitely. + // + // NOTE: pw.Close never returns an error. The signature is only to + // implement io.Closer. + _ = pw.Close() + + // Close the inner ReadCloser. + // + // NOTE: net/http will call close on success, so if we've already + // closed the inner rc, it's not an error. + if err := l.blob.Close(); err != nil && !errors.Is(err, os.ErrClosed) { + return err + } + + // Finalize layer with its digest and size values. + <-doneDigesting + return l.finalize(h, zh, count.n) + }, + } + go func() { + // Copy blob into the gzip writer, which also hashes and counts the + // size of the compressed output, and hasher of the raw contents. + _, copyErr := io.Copy(io.MultiWriter(h, zw), l.blob) + + // Close the gzip writer once copying is done. If this is done in the + // Close method of compressedReader instead, then it can cause a panic + // when the compressedReader is closed before the blob is fully + // consumed and io.Copy in this goroutine is still blocking. + closeErr := zw.Close() + + // Check errors from writing and closing streams. + if copyErr != nil { + close(doneDigesting) + pw.CloseWithError(copyErr) + return + } + if closeErr != nil { + close(doneDigesting) + pw.CloseWithError(closeErr) + return + } + + // Flush the buffer once all writes are complete to the gzip writer. + if err := bw.Flush(); err != nil { + close(doneDigesting) + pw.CloseWithError(err) + return + } + + // Notify closer that digests are done being written. + close(doneDigesting) + + // Close the compressed reader to calculate digest/diffID/size. This + // will cause pr to return EOF which will cause readers of the + // Compressed stream to finish reading. + pw.CloseWithError(cr.Close()) + }() + + return cr, nil +} + +func (cr *compressedReader) Read(b []byte) (int, error) { return cr.pr.Read(b) } + +func (cr *compressedReader) Close() error { return cr.closer() } + +// countWriter counts bytes written to it. +type countWriter struct{ n int64 } + +func (c *countWriter) Write(p []byte) (int, error) { + c.n += int64(len(p)) + return len(p), nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/README.md b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/README.md new file mode 100644 index 00000000..03f339b0 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/README.md @@ -0,0 +1,280 @@ +# `tarball` + +[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/tarball?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/tarball) + +This package produces tarballs that can consumed via `docker load`. Note +that this is a _different_ format from the [`legacy`](/pkg/legacy/tarball) +tarballs that are produced by `docker save`, but this package is still able to +read the legacy tarballs produced by `docker save`. + +## Usage + +```go +package main + +import ( + "os" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/tarball" +) + +func main() { + // Read a tarball from os.Args[1] that contains ubuntu. + tag, err := name.NewTag("ubuntu") + if err != nil { + panic(err) + } + img, err := tarball.ImageFromPath(os.Args[1], &tag) + if err != nil { + panic(err) + } + + // Write that tarball to os.Args[2] with a different tag. + newTag, err := name.NewTag("ubuntu:newest") + if err != nil { + panic(err) + } + f, err := os.Create(os.Args[2]) + if err != nil { + panic(err) + } + defer f.Close() + + if err := tarball.Write(newTag, img, f); err != nil { + panic(err) + } +} +``` + +## Structure + +

+ +

+ +Let's look at what happens when we write out a tarball: + + +### `ubuntu:latest` + +``` +$ crane pull ubuntu ubuntu.tar && mkdir ubuntu && tar xf ubuntu.tar -C ubuntu && rm ubuntu.tar +$ tree ubuntu/ +ubuntu/ +├── 423ae2b273f4c17ceee9e8482fa8d071d90c7d052ae208e1fe4963fceb3d6954.tar.gz +├── b6b53be908de2c0c78070fff0a9f04835211b3156c4e73785747af365e71a0d7.tar.gz +├── de83a2304fa1f7c4a13708a0d15b9704f5945c2be5cbb2b3ed9b2ccb718d0b3d.tar.gz +├── f9a83bce3af0648efaa60b9bb28225b09136d2d35d0bed25ac764297076dec1b.tar.gz +├── manifest.json +└── sha256:72300a873c2ca11c70d0c8642177ce76ff69ae04d61a5813ef58d40ff66e3e7c + +0 directories, 6 files +``` + +There are a couple interesting files here. + +`manifest.json` is the entrypoint: a list of [`tarball.Descriptor`s](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/tarball#Descriptor) +that describe the images contained in this tarball. + +For each image, this has the `RepoTags` (how it was pulled), a `Config` file +that points to the image's config file, a list of `Layers`, and (optionally) +`LayerSources`. + +``` +$ jq < ubuntu/manifest.json +[ + { + "Config": "sha256:72300a873c2ca11c70d0c8642177ce76ff69ae04d61a5813ef58d40ff66e3e7c", + "RepoTags": [ + "ubuntu" + ], + "Layers": [ + "423ae2b273f4c17ceee9e8482fa8d071d90c7d052ae208e1fe4963fceb3d6954.tar.gz", + "de83a2304fa1f7c4a13708a0d15b9704f5945c2be5cbb2b3ed9b2ccb718d0b3d.tar.gz", + "f9a83bce3af0648efaa60b9bb28225b09136d2d35d0bed25ac764297076dec1b.tar.gz", + "b6b53be908de2c0c78070fff0a9f04835211b3156c4e73785747af365e71a0d7.tar.gz" + ] + } +] +``` + +The config file and layers are exactly what you would expect, and match the +registry representations of the same artifacts. You'll notice that the +`manifest.json` contains similar information as the registry manifest, but isn't +quite the same: + +``` +$ crane manifest ubuntu@sha256:0925d086715714114c1988f7c947db94064fd385e171a63c07730f1fa014e6f9 +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 3408, + "digest": "sha256:72300a873c2ca11c70d0c8642177ce76ff69ae04d61a5813ef58d40ff66e3e7c" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 26692096, + "digest": "sha256:423ae2b273f4c17ceee9e8482fa8d071d90c7d052ae208e1fe4963fceb3d6954" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 35365, + "digest": "sha256:de83a2304fa1f7c4a13708a0d15b9704f5945c2be5cbb2b3ed9b2ccb718d0b3d" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 852, + "digest": "sha256:f9a83bce3af0648efaa60b9bb28225b09136d2d35d0bed25ac764297076dec1b" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 163, + "digest": "sha256:b6b53be908de2c0c78070fff0a9f04835211b3156c4e73785747af365e71a0d7" + } + ] +} +``` + +This makes it difficult to maintain image digests when roundtripping images +through the tarball format, so it's not a great format if you care about +provenance. + +The ubuntu example didn't have any `LayerSources` -- let's look at another image +that does. + +### `hello-world:nanoserver` + +``` +$ crane pull hello-world:nanoserver@sha256:63c287625c2b0b72900e562de73c0e381472a83b1b39217aef3856cd398eca0b nanoserver.tar +$ mkdir nanoserver && tar xf nanoserver.tar -C nanoserver && rm nanoserver.tar +$ tree nanoserver/ +nanoserver/ +├── 10d1439be4eb8819987ec2e9c140d44d74d6b42a823d57fe1953bd99948e1bc0.tar.gz +├── a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053.tar.gz +├── be21f08f670160cbae227e3053205b91d6bfa3de750b90c7e00bd2c511ccb63a.tar.gz +├── manifest.json +└── sha256:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6 + +0 directories, 5 files + +$ jq < nanoserver/manifest.json +[ + { + "Config": "sha256:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6", + "RepoTags": [ + "index.docker.io/library/hello-world:i-was-a-digest" + ], + "Layers": [ + "a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053.tar.gz", + "be21f08f670160cbae227e3053205b91d6bfa3de750b90c7e00bd2c511ccb63a.tar.gz", + "10d1439be4eb8819987ec2e9c140d44d74d6b42a823d57fe1953bd99948e1bc0.tar.gz" + ], + "LayerSources": { + "sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e": { + "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", + "size": 101145811, + "digest": "sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053", + "urls": [ + "https://mcr.microsoft.com/v2/windows/nanoserver/blobs/sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053" + ] + } + } + } +] +``` + +A couple things to note about this `manifest.json` versus the other: +* The `RepoTags` field is a bit weird here. `hello-world` is a multi-platform + image, so We had to pull this image by digest, since we're (I'm) on + amd64/linux and wanted to grab a windows image. Since the tarball format + expects a tag under `RepoTags`, and we didn't pull by tag, we replace the + digest with a sentinel `i-was-a-digest` "tag" to appease docker. +* The `LayerSources` has enough information to reconstruct the foreign layers + pointer when pushing/pulling from the registry. For legal reasons, microsoft + doesn't want anyone but them to serve windows base images, so the mediaType + here indicates a "foreign" or "non-distributable" layer with an URL for where + you can download it from microsoft (see the [OCI + image-spec](https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers)). + +We can look at what's in the registry to explain both of these things: +``` +$ crane manifest hello-world:nanoserver | jq . +{ + "manifests": [ + { + "digest": "sha256:63c287625c2b0b72900e562de73c0e381472a83b1b39217aef3856cd398eca0b", + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "platform": { + "architecture": "amd64", + "os": "windows", + "os.version": "10.0.17763.1040" + }, + "size": 1124 + } + ], + "mediaType": "application/vnd.docker.distribution.manifest.list.v2+json", + "schemaVersion": 2 +} + + +# Note the media type and "urls" field. +$ crane manifest hello-world:nanoserver@sha256:63c287625c2b0b72900e562de73c0e381472a83b1b39217aef3856cd398eca0b | jq . +{ + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 1721, + "digest": "sha256:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6" + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", + "size": 101145811, + "digest": "sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053", + "urls": [ + "https://mcr.microsoft.com/v2/windows/nanoserver/blobs/sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053" + ] + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 1669, + "digest": "sha256:be21f08f670160cbae227e3053205b91d6bfa3de750b90c7e00bd2c511ccb63a" + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 949, + "digest": "sha256:10d1439be4eb8819987ec2e9c140d44d74d6b42a823d57fe1953bd99948e1bc0" + } + ] +} +``` + +The `LayerSources` map is keyed by the diffid. Note that `sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e` matches the first layer in the config file: +``` +$ jq '.[0].LayerSources' < nanoserver/manifest.json +{ + "sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e": { + "mediaType": "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip", + "size": 101145811, + "digest": "sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053", + "urls": [ + "https://mcr.microsoft.com/v2/windows/nanoserver/blobs/sha256:a35da61c356213336e646756218539950461ff2bf096badf307a23add6e70053" + ] + } +} + +$ jq < nanoserver/sha256\:bc5d255ea81f83c8c38a982a6d29a6f2198427d258aea5f166e49856896b2da6 | jq .rootfs +{ + "type": "layers", + "diff_ids": [ + "sha256:26fd2d9d4c64a4f965bbc77939a454a31b607470f430b5d69fc21ded301fa55e", + "sha256:601cf7d78c62e4b4d32a7bbf96a17606a9cea5bd9d22ffa6f34aa431d056b0e8", + "sha256:a1e1a3bf6529adcce4d91dce2cad86c2604a66b507ccbc4d2239f3da0ec5aab9" + ] +} +``` diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go new file mode 100644 index 00000000..4eb79bb4 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tarball provides facilities for reading/writing v1.Images from/to +// a tarball on-disk. +package tarball diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go new file mode 100644 index 00000000..1f977e16 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go @@ -0,0 +1,429 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tarball + +import ( + "archive/tar" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "sync" + + comp "github.com/google/go-containerregistry/internal/compression" + "github.com/google/go-containerregistry/pkg/compression" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type image struct { + opener Opener + manifest *Manifest + config []byte + imgDescriptor *Descriptor + + tag *name.Tag +} + +type uncompressedImage struct { + *image +} + +type compressedImage struct { + *image + manifestLock sync.Mutex // Protects manifest + manifest *v1.Manifest +} + +var _ partial.UncompressedImageCore = (*uncompressedImage)(nil) +var _ partial.CompressedImageCore = (*compressedImage)(nil) + +// Opener is a thunk for opening a tar file. +type Opener func() (io.ReadCloser, error) + +func pathOpener(path string) Opener { + return func() (io.ReadCloser, error) { + return os.Open(path) + } +} + +// ImageFromPath returns a v1.Image from a tarball located on path. +func ImageFromPath(path string, tag *name.Tag) (v1.Image, error) { + return Image(pathOpener(path), tag) +} + +// LoadManifest load manifest +func LoadManifest(opener Opener) (Manifest, error) { + m, err := extractFileFromTar(opener, "manifest.json") + if err != nil { + return nil, err + } + defer m.Close() + + var manifest Manifest + + if err := json.NewDecoder(m).Decode(&manifest); err != nil { + return nil, err + } + return manifest, nil +} + +// Image exposes an image from the tarball at the provided path. +func Image(opener Opener, tag *name.Tag) (v1.Image, error) { + img := &image{ + opener: opener, + tag: tag, + } + if err := img.loadTarDescriptorAndConfig(); err != nil { + return nil, err + } + + // Peek at the first layer and see if it's compressed. + if len(img.imgDescriptor.Layers) > 0 { + compressed, err := img.areLayersCompressed() + if err != nil { + return nil, err + } + if compressed { + c := compressedImage{ + image: img, + } + return partial.CompressedToImage(&c) + } + } + + uc := uncompressedImage{ + image: img, + } + return partial.UncompressedToImage(&uc) +} + +func (i *image) MediaType() (types.MediaType, error) { + return types.DockerManifestSchema2, nil +} + +// Descriptor stores the manifest data for a single image inside a `docker save` tarball. +type Descriptor struct { + Config string + RepoTags []string + Layers []string + + // Tracks foreign layer info. Key is DiffID. + LayerSources map[v1.Hash]v1.Descriptor `json:",omitempty"` +} + +// Manifest represents the manifests of all images as the `manifest.json` file in a `docker save` tarball. +type Manifest []Descriptor + +func (m Manifest) findDescriptor(tag *name.Tag) (*Descriptor, error) { + if tag == nil { + if len(m) != 1 { + return nil, errors.New("tarball must contain only a single image to be used with tarball.Image") + } + return &(m)[0], nil + } + for _, img := range m { + for _, tagStr := range img.RepoTags { + repoTag, err := name.NewTag(tagStr) + if err != nil { + return nil, err + } + + // Compare the resolved names, since there are several ways to specify the same tag. + if repoTag.Name() == tag.Name() { + return &img, nil + } + } + } + return nil, fmt.Errorf("tag %s not found in tarball", tag) +} + +func (i *image) areLayersCompressed() (bool, error) { + if len(i.imgDescriptor.Layers) == 0 { + return false, errors.New("0 layers found in image") + } + layer := i.imgDescriptor.Layers[0] + blob, err := extractFileFromTar(i.opener, layer) + if err != nil { + return false, err + } + defer blob.Close() + + cp, _, err := comp.PeekCompression(blob) + if err != nil { + return false, err + } + + return cp != compression.None, nil +} + +func (i *image) loadTarDescriptorAndConfig() error { + m, err := extractFileFromTar(i.opener, "manifest.json") + if err != nil { + return err + } + defer m.Close() + + if err := json.NewDecoder(m).Decode(&i.manifest); err != nil { + return err + } + + if i.manifest == nil { + return errors.New("no valid manifest.json in tarball") + } + + i.imgDescriptor, err = i.manifest.findDescriptor(i.tag) + if err != nil { + return err + } + + cfg, err := extractFileFromTar(i.opener, i.imgDescriptor.Config) + if err != nil { + return err + } + defer cfg.Close() + + i.config, err = io.ReadAll(cfg) + if err != nil { + return err + } + return nil +} + +func (i *image) RawConfigFile() ([]byte, error) { + return i.config, nil +} + +// tarFile represents a single file inside a tar. Closing it closes the tar itself. +type tarFile struct { + io.Reader + io.Closer +} + +func extractFileFromTar(opener Opener, filePath string) (io.ReadCloser, error) { + f, err := opener() + if err != nil { + return nil, err + } + close := true + defer func() { + if close { + f.Close() + } + }() + + tf := tar.NewReader(f) + for { + hdr, err := tf.Next() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, err + } + if hdr.Name == filePath { + if hdr.Typeflag == tar.TypeSymlink || hdr.Typeflag == tar.TypeLink { + currentDir := filepath.Dir(filePath) + return extractFileFromTar(opener, path.Join(currentDir, path.Clean(hdr.Linkname))) + } + close = false + return tarFile{ + Reader: tf, + Closer: f, + }, nil + } + } + return nil, fmt.Errorf("file %s not found in tar", filePath) +} + +// uncompressedLayerFromTarball implements partial.UncompressedLayer +type uncompressedLayerFromTarball struct { + diffID v1.Hash + mediaType types.MediaType + opener Opener + filePath string +} + +// foreignUncompressedLayer implements partial.UncompressedLayer but returns +// a custom descriptor. This allows the foreign layer URLs to be included in +// the generated image manifest for uncompressed layers. +type foreignUncompressedLayer struct { + uncompressedLayerFromTarball + desc v1.Descriptor +} + +func (fl *foreignUncompressedLayer) Descriptor() (*v1.Descriptor, error) { + return &fl.desc, nil +} + +// DiffID implements partial.UncompressedLayer +func (ulft *uncompressedLayerFromTarball) DiffID() (v1.Hash, error) { + return ulft.diffID, nil +} + +// Uncompressed implements partial.UncompressedLayer +func (ulft *uncompressedLayerFromTarball) Uncompressed() (io.ReadCloser, error) { + return extractFileFromTar(ulft.opener, ulft.filePath) +} + +func (ulft *uncompressedLayerFromTarball) MediaType() (types.MediaType, error) { + return ulft.mediaType, nil +} + +func (i *uncompressedImage) LayerByDiffID(h v1.Hash) (partial.UncompressedLayer, error) { + cfg, err := partial.ConfigFile(i) + if err != nil { + return nil, err + } + for idx, diffID := range cfg.RootFS.DiffIDs { + if diffID == h { + // Technically the media type should be 'application/tar' but given that our + // v1.Layer doesn't force consumers to care about whether the layer is compressed + // we should be fine returning the DockerLayer media type + mt := types.DockerLayer + if bd, ok := i.imgDescriptor.LayerSources[h]; ok { + // Overwrite the mediaType for foreign layers. + return &foreignUncompressedLayer{ + uncompressedLayerFromTarball: uncompressedLayerFromTarball{ + diffID: diffID, + mediaType: bd.MediaType, + opener: i.opener, + filePath: i.imgDescriptor.Layers[idx], + }, + desc: bd, + }, nil + } + return &uncompressedLayerFromTarball{ + diffID: diffID, + mediaType: mt, + opener: i.opener, + filePath: i.imgDescriptor.Layers[idx], + }, nil + } + } + return nil, fmt.Errorf("diff id %q not found", h) +} + +func (c *compressedImage) Manifest() (*v1.Manifest, error) { + c.manifestLock.Lock() + defer c.manifestLock.Unlock() + if c.manifest != nil { + return c.manifest, nil + } + + b, err := c.RawConfigFile() + if err != nil { + return nil, err + } + + cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b)) + if err != nil { + return nil, err + } + + c.manifest = &v1.Manifest{ + SchemaVersion: 2, + MediaType: types.DockerManifestSchema2, + Config: v1.Descriptor{ + MediaType: types.DockerConfigJSON, + Size: cfgSize, + Digest: cfgHash, + }, + } + + for i, p := range c.imgDescriptor.Layers { + cfg, err := partial.ConfigFile(c) + if err != nil { + return nil, err + } + diffid := cfg.RootFS.DiffIDs[i] + if d, ok := c.imgDescriptor.LayerSources[diffid]; ok { + // If it's a foreign layer, just append the descriptor so we can avoid + // reading the entire file. + c.manifest.Layers = append(c.manifest.Layers, d) + } else { + l, err := extractFileFromTar(c.opener, p) + if err != nil { + return nil, err + } + defer l.Close() + sha, size, err := v1.SHA256(l) + if err != nil { + return nil, err + } + c.manifest.Layers = append(c.manifest.Layers, v1.Descriptor{ + MediaType: types.DockerLayer, + Size: size, + Digest: sha, + }) + } + } + return c.manifest, nil +} + +func (c *compressedImage) RawManifest() ([]byte, error) { + return partial.RawManifest(c) +} + +// compressedLayerFromTarball implements partial.CompressedLayer +type compressedLayerFromTarball struct { + desc v1.Descriptor + opener Opener + filePath string +} + +// Digest implements partial.CompressedLayer +func (clft *compressedLayerFromTarball) Digest() (v1.Hash, error) { + return clft.desc.Digest, nil +} + +// Compressed implements partial.CompressedLayer +func (clft *compressedLayerFromTarball) Compressed() (io.ReadCloser, error) { + return extractFileFromTar(clft.opener, clft.filePath) +} + +// MediaType implements partial.CompressedLayer +func (clft *compressedLayerFromTarball) MediaType() (types.MediaType, error) { + return clft.desc.MediaType, nil +} + +// Size implements partial.CompressedLayer +func (clft *compressedLayerFromTarball) Size() (int64, error) { + return clft.desc.Size, nil +} + +func (c *compressedImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { + m, err := c.Manifest() + if err != nil { + return nil, err + } + for i, l := range m.Layers { + if l.Digest == h { + fp := c.imgDescriptor.Layers[i] + return &compressedLayerFromTarball{ + desc: l, + opener: c.opener, + filePath: fp, + }, nil + } + } + return nil, fmt.Errorf("blob %v not found", h) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go new file mode 100644 index 00000000..a344e920 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go @@ -0,0 +1,349 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tarball + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "os" + "sync" + + "github.com/containerd/stargz-snapshotter/estargz" + "github.com/google/go-containerregistry/internal/and" + comp "github.com/google/go-containerregistry/internal/compression" + gestargz "github.com/google/go-containerregistry/internal/estargz" + ggzip "github.com/google/go-containerregistry/internal/gzip" + "github.com/google/go-containerregistry/internal/zstd" + "github.com/google/go-containerregistry/pkg/compression" + "github.com/google/go-containerregistry/pkg/logs" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +type layer struct { + digest v1.Hash + diffID v1.Hash + size int64 + compressedopener Opener + uncompressedopener Opener + compression compression.Compression + compressionLevel int + annotations map[string]string + estgzopts []estargz.Option + mediaType types.MediaType +} + +// Descriptor implements partial.withDescriptor. +func (l *layer) Descriptor() (*v1.Descriptor, error) { + digest, err := l.Digest() + if err != nil { + return nil, err + } + return &v1.Descriptor{ + Size: l.size, + Digest: digest, + Annotations: l.annotations, + MediaType: l.mediaType, + }, nil +} + +// Digest implements v1.Layer +func (l *layer) Digest() (v1.Hash, error) { + return l.digest, nil +} + +// DiffID implements v1.Layer +func (l *layer) DiffID() (v1.Hash, error) { + return l.diffID, nil +} + +// Compressed implements v1.Layer +func (l *layer) Compressed() (io.ReadCloser, error) { + return l.compressedopener() +} + +// Uncompressed implements v1.Layer +func (l *layer) Uncompressed() (io.ReadCloser, error) { + return l.uncompressedopener() +} + +// Size implements v1.Layer +func (l *layer) Size() (int64, error) { + return l.size, nil +} + +// MediaType implements v1.Layer +func (l *layer) MediaType() (types.MediaType, error) { + return l.mediaType, nil +} + +// LayerOption applies options to layer +type LayerOption func(*layer) + +// WithCompression is a functional option for overriding the default +// compression algorithm used for compressing uncompressed tarballs. +// Please note that WithCompression(compression.ZStd) should be used +// in conjunction with WithMediaType(types.OCILayerZStd) +func WithCompression(comp compression.Compression) LayerOption { + return func(l *layer) { + switch comp { + case compression.ZStd: + l.compression = compression.ZStd + case compression.GZip: + l.compression = compression.GZip + case compression.None: + logs.Warn.Printf("Compression type 'none' is not supported for tarball layers; using gzip compression.") + l.compression = compression.GZip + default: + logs.Warn.Printf("Unexpected compression type for WithCompression(): %s; using gzip compression instead.", comp) + l.compression = compression.GZip + } + } +} + +// WithCompressionLevel is a functional option for overriding the default +// compression level used for compressing uncompressed tarballs. +func WithCompressionLevel(level int) LayerOption { + return func(l *layer) { + l.compressionLevel = level + } +} + +// WithMediaType is a functional option for overriding the layer's media type. +func WithMediaType(mt types.MediaType) LayerOption { + return func(l *layer) { + l.mediaType = mt + } +} + +// WithCompressedCaching is a functional option that overrides the +// logic for accessing the compressed bytes to memoize the result +// and avoid expensive repeated gzips. +func WithCompressedCaching(l *layer) { + var once sync.Once + var err error + + buf := bytes.NewBuffer(nil) + og := l.compressedopener + + l.compressedopener = func() (io.ReadCloser, error) { + once.Do(func() { + var rc io.ReadCloser + rc, err = og() + if err == nil { + defer rc.Close() + _, err = io.Copy(buf, rc) + } + }) + if err != nil { + return nil, err + } + + return io.NopCloser(bytes.NewBuffer(buf.Bytes())), nil + } +} + +// WithEstargzOptions is a functional option that allow the caller to pass +// through estargz.Options to the underlying compression layer. This is +// only meaningful when estargz is enabled. +func WithEstargzOptions(opts ...estargz.Option) LayerOption { + return func(l *layer) { + l.estgzopts = opts + } +} + +// WithEstargz is a functional option that explicitly enables estargz support. +func WithEstargz(l *layer) { + oguncompressed := l.uncompressedopener + estargz := func() (io.ReadCloser, error) { + crc, err := oguncompressed() + if err != nil { + return nil, err + } + eopts := append(l.estgzopts, estargz.WithCompressionLevel(l.compressionLevel)) + rc, h, err := gestargz.ReadCloser(crc, eopts...) + if err != nil { + return nil, err + } + l.annotations[estargz.TOCJSONDigestAnnotation] = h.String() + return &and.ReadCloser{ + Reader: rc, + CloseFunc: func() error { + err := rc.Close() + if err != nil { + return err + } + // As an optimization, leverage the DiffID exposed by the estargz ReadCloser + l.diffID, err = v1.NewHash(rc.DiffID().String()) + return err + }, + }, nil + } + uncompressed := func() (io.ReadCloser, error) { + urc, err := estargz() + if err != nil { + return nil, err + } + return ggzip.UnzipReadCloser(urc) + } + + l.compressedopener = estargz + l.uncompressedopener = uncompressed +} + +// LayerFromFile returns a v1.Layer given a tarball +func LayerFromFile(path string, opts ...LayerOption) (v1.Layer, error) { + opener := func() (io.ReadCloser, error) { + return os.Open(path) + } + return LayerFromOpener(opener, opts...) +} + +// LayerFromOpener returns a v1.Layer given an Opener function. +// The Opener may return either an uncompressed tarball (common), +// or a compressed tarball (uncommon). +// +// When using this in conjunction with something like remote.Write +// the uncompressed path may end up gzipping things multiple times: +// 1. Compute the layer SHA256 +// 2. Upload the compressed layer. +// +// Since gzip can be expensive, we support an option to memoize the +// compression that can be passed here: tarball.WithCompressedCaching +func LayerFromOpener(opener Opener, opts ...LayerOption) (v1.Layer, error) { + comp, err := comp.GetCompression(opener) + if err != nil { + return nil, err + } + + layer := &layer{ + compression: compression.GZip, + compressionLevel: gzip.BestSpeed, + annotations: make(map[string]string, 1), + mediaType: types.DockerLayer, + } + + if estgz := os.Getenv("GGCR_EXPERIMENT_ESTARGZ"); estgz == "1" { + opts = append([]LayerOption{WithEstargz}, opts...) + } + + switch comp { + case compression.GZip: + layer.compressedopener = opener + layer.uncompressedopener = func() (io.ReadCloser, error) { + urc, err := opener() + if err != nil { + return nil, err + } + return ggzip.UnzipReadCloser(urc) + } + case compression.ZStd: + layer.compressedopener = opener + layer.uncompressedopener = func() (io.ReadCloser, error) { + urc, err := opener() + if err != nil { + return nil, err + } + return zstd.UnzipReadCloser(urc) + } + default: + layer.uncompressedopener = opener + layer.compressedopener = func() (io.ReadCloser, error) { + crc, err := opener() + if err != nil { + return nil, err + } + + if layer.compression == compression.ZStd { + return zstd.ReadCloserLevel(crc, layer.compressionLevel), nil + } + + return ggzip.ReadCloserLevel(crc, layer.compressionLevel), nil + } + } + + for _, opt := range opts { + opt(layer) + } + + // Warn if media type does not match compression + var mediaTypeMismatch = false + switch layer.compression { + case compression.GZip: + mediaTypeMismatch = + layer.mediaType != types.OCILayer && + layer.mediaType != types.OCIRestrictedLayer && + layer.mediaType != types.DockerLayer + + case compression.ZStd: + mediaTypeMismatch = layer.mediaType != types.OCILayerZStd + } + + if mediaTypeMismatch { + logs.Warn.Printf("Unexpected mediaType (%s) for selected compression in %s in LayerFromOpener().", layer.mediaType, layer.compression) + } + + if layer.digest, layer.size, err = computeDigest(layer.compressedopener); err != nil { + return nil, err + } + + empty := v1.Hash{} + if layer.diffID == empty { + if layer.diffID, err = computeDiffID(layer.uncompressedopener); err != nil { + return nil, err + } + } + + return layer, nil +} + +// LayerFromReader returns a v1.Layer given a io.Reader. +// +// The reader's contents are read and buffered to a temp file in the process. +// +// Deprecated: Use LayerFromOpener or stream.NewLayer instead, if possible. +func LayerFromReader(reader io.Reader, opts ...LayerOption) (v1.Layer, error) { + tmp, err := os.CreateTemp("", "") + if err != nil { + return nil, fmt.Errorf("creating temp file to buffer reader: %w", err) + } + if _, err := io.Copy(tmp, reader); err != nil { + return nil, fmt.Errorf("writing temp file to buffer reader: %w", err) + } + return LayerFromFile(tmp.Name(), opts...) +} + +func computeDigest(opener Opener) (v1.Hash, int64, error) { + rc, err := opener() + if err != nil { + return v1.Hash{}, 0, err + } + defer rc.Close() + + return v1.SHA256(rc) +} + +func computeDiffID(opener Opener) (v1.Hash, error) { + rc, err := opener() + if err != nil { + return v1.Hash{}, err + } + defer rc.Close() + + digest, _, err := v1.SHA256(rc) + return digest, err +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go new file mode 100644 index 00000000..7d7a0b3b --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go @@ -0,0 +1,459 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tarball + +import ( + "archive/tar" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "sort" + "strings" + + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" +) + +// WriteToFile writes in the compressed format to a tarball, on disk. +// This is just syntactic sugar wrapping tarball.Write with a new file. +func WriteToFile(p string, ref name.Reference, img v1.Image, opts ...WriteOption) error { + w, err := os.Create(p) + if err != nil { + return err + } + defer w.Close() + + return Write(ref, img, w, opts...) +} + +// MultiWriteToFile writes in the compressed format to a tarball, on disk. +// This is just syntactic sugar wrapping tarball.MultiWrite with a new file. +func MultiWriteToFile(p string, tagToImage map[name.Tag]v1.Image, opts ...WriteOption) error { + refToImage := make(map[name.Reference]v1.Image, len(tagToImage)) + for i, d := range tagToImage { + refToImage[i] = d + } + return MultiRefWriteToFile(p, refToImage, opts...) +} + +// MultiRefWriteToFile writes in the compressed format to a tarball, on disk. +// This is just syntactic sugar wrapping tarball.MultiRefWrite with a new file. +func MultiRefWriteToFile(p string, refToImage map[name.Reference]v1.Image, opts ...WriteOption) error { + w, err := os.Create(p) + if err != nil { + return err + } + defer w.Close() + + return MultiRefWrite(refToImage, w, opts...) +} + +// Write is a wrapper to write a single image and tag to a tarball. +func Write(ref name.Reference, img v1.Image, w io.Writer, opts ...WriteOption) error { + return MultiRefWrite(map[name.Reference]v1.Image{ref: img}, w, opts...) +} + +// MultiWrite writes the contents of each image to the provided reader, in the compressed format. +// The contents are written in the following format: +// One manifest.json file at the top level containing information about several images. +// One file for each layer, named after the layer's SHA. +// One file for the config blob, named after its SHA. +func MultiWrite(tagToImage map[name.Tag]v1.Image, w io.Writer, opts ...WriteOption) error { + refToImage := make(map[name.Reference]v1.Image, len(tagToImage)) + for i, d := range tagToImage { + refToImage[i] = d + } + return MultiRefWrite(refToImage, w, opts...) +} + +// MultiRefWrite writes the contents of each image to the provided reader, in the compressed format. +// The contents are written in the following format: +// One manifest.json file at the top level containing information about several images. +// One file for each layer, named after the layer's SHA. +// One file for the config blob, named after its SHA. +func MultiRefWrite(refToImage map[name.Reference]v1.Image, w io.Writer, opts ...WriteOption) error { + // process options + o := &writeOptions{ + updates: nil, + } + for _, option := range opts { + if err := option(o); err != nil { + return err + } + } + + size, mBytes, err := getSizeAndManifest(refToImage) + if err != nil { + return sendUpdateReturn(o, err) + } + + return writeImagesToTar(refToImage, mBytes, size, w, o) +} + +// sendUpdateReturn return the passed in error message, also sending on update channel, if it exists +func sendUpdateReturn(o *writeOptions, err error) error { + if o != nil && o.updates != nil { + o.updates <- v1.Update{ + Error: err, + } + } + return err +} + +// sendProgressWriterReturn return the passed in error message, also sending on update channel, if it exists, along with downloaded information +func sendProgressWriterReturn(pw *progressWriter, err error) error { + if pw != nil { + return pw.Error(err) + } + return err +} + +// writeImagesToTar writes the images to the tarball +func writeImagesToTar(refToImage map[name.Reference]v1.Image, m []byte, size int64, w io.Writer, o *writeOptions) (err error) { + if w == nil { + return sendUpdateReturn(o, errors.New("must pass valid writer")) + } + imageToTags := dedupRefToImage(refToImage) + + tw := w + var pw *progressWriter + + // we only calculate the sizes and use a progressWriter if we were provided + // an option with a progress channel + if o != nil && o.updates != nil { + pw = &progressWriter{ + w: w, + updates: o.updates, + size: size, + } + tw = pw + } + + tf := tar.NewWriter(tw) + defer tf.Close() + + seenLayerDigests := make(map[string]struct{}) + + for img := range imageToTags { + // Write the config. + cfgName, err := img.ConfigName() + if err != nil { + return sendProgressWriterReturn(pw, err) + } + cfgBlob, err := img.RawConfigFile() + if err != nil { + return sendProgressWriterReturn(pw, err) + } + if err := writeTarEntry(tf, cfgName.String(), bytes.NewReader(cfgBlob), int64(len(cfgBlob))); err != nil { + return sendProgressWriterReturn(pw, err) + } + + // Write the layers. + layers, err := img.Layers() + if err != nil { + return sendProgressWriterReturn(pw, err) + } + layerFiles := make([]string, len(layers)) + for i, l := range layers { + d, err := l.Digest() + if err != nil { + return sendProgressWriterReturn(pw, err) + } + // Munge the file name to appease ancient technology. + // + // tar assumes anything with a colon is a remote tape drive: + // https://www.gnu.org/software/tar/manual/html_section/tar_45.html + // Drop the algorithm prefix, e.g. "sha256:" + hex := d.Hex + + // gunzip expects certain file extensions: + // https://www.gnu.org/software/gzip/manual/html_node/Overview.html + layerFiles[i] = fmt.Sprintf("%s.tar.gz", hex) + + if _, ok := seenLayerDigests[hex]; ok { + continue + } + seenLayerDigests[hex] = struct{}{} + + r, err := l.Compressed() + if err != nil { + return sendProgressWriterReturn(pw, err) + } + blobSize, err := l.Size() + if err != nil { + return sendProgressWriterReturn(pw, err) + } + + if err := writeTarEntry(tf, layerFiles[i], r, blobSize); err != nil { + return sendProgressWriterReturn(pw, err) + } + } + } + if err := writeTarEntry(tf, "manifest.json", bytes.NewReader(m), int64(len(m))); err != nil { + return sendProgressWriterReturn(pw, err) + } + + // be sure to close the tar writer so everything is flushed out before we send our EOF + if err := tf.Close(); err != nil { + return sendProgressWriterReturn(pw, err) + } + // send an EOF to indicate finished on the channel, but nil as our return error + _ = sendProgressWriterReturn(pw, io.EOF) + return nil +} + +// calculateManifest calculates the manifest and optionally the size of the tar file +func calculateManifest(refToImage map[name.Reference]v1.Image) (m Manifest, err error) { + imageToTags := dedupRefToImage(refToImage) + + if len(imageToTags) == 0 { + return nil, errors.New("set of images is empty") + } + + for img, tags := range imageToTags { + cfgName, err := img.ConfigName() + if err != nil { + return nil, err + } + + // Store foreign layer info. + layerSources := make(map[v1.Hash]v1.Descriptor) + + // Write the layers. + layers, err := img.Layers() + if err != nil { + return nil, err + } + layerFiles := make([]string, len(layers)) + for i, l := range layers { + d, err := l.Digest() + if err != nil { + return nil, err + } + // Munge the file name to appease ancient technology. + // + // tar assumes anything with a colon is a remote tape drive: + // https://www.gnu.org/software/tar/manual/html_section/tar_45.html + // Drop the algorithm prefix, e.g. "sha256:" + hex := d.Hex + + // gunzip expects certain file extensions: + // https://www.gnu.org/software/gzip/manual/html_node/Overview.html + layerFiles[i] = fmt.Sprintf("%s.tar.gz", hex) + + // Add to LayerSources if it's a foreign layer. + desc, err := partial.BlobDescriptor(img, d) + if err != nil { + return nil, err + } + if !desc.MediaType.IsDistributable() { + diffid, err := partial.BlobToDiffID(img, d) + if err != nil { + return nil, err + } + layerSources[diffid] = *desc + } + } + + // Generate the tar descriptor and write it. + m = append(m, Descriptor{ + Config: cfgName.String(), + RepoTags: tags, + Layers: layerFiles, + LayerSources: layerSources, + }) + } + // sort by name of the repotags so it is consistent. Alternatively, we could sort by hash of the + // descriptor, but that would make it hard for humans to process + sort.Slice(m, func(i, j int) bool { + return strings.Join(m[i].RepoTags, ",") < strings.Join(m[j].RepoTags, ",") + }) + + return m, nil +} + +// CalculateSize calculates the expected complete size of the output tar file +func CalculateSize(refToImage map[name.Reference]v1.Image) (size int64, err error) { + size, _, err = getSizeAndManifest(refToImage) + return size, err +} + +func getSizeAndManifest(refToImage map[name.Reference]v1.Image) (int64, []byte, error) { + m, err := calculateManifest(refToImage) + if err != nil { + return 0, nil, fmt.Errorf("unable to calculate manifest: %w", err) + } + mBytes, err := json.Marshal(m) + if err != nil { + return 0, nil, fmt.Errorf("could not marshall manifest to bytes: %w", err) + } + + size, err := calculateTarballSize(refToImage, mBytes) + if err != nil { + return 0, nil, fmt.Errorf("error calculating tarball size: %w", err) + } + return size, mBytes, nil +} + +// calculateTarballSize calculates the size of the tar file +func calculateTarballSize(refToImage map[name.Reference]v1.Image, mBytes []byte) (size int64, err error) { + imageToTags := dedupRefToImage(refToImage) + + seenLayerDigests := make(map[string]struct{}) + for img, name := range imageToTags { + manifest, err := img.Manifest() + if err != nil { + return size, fmt.Errorf("unable to get manifest for img %s: %w", name, err) + } + size += calculateSingleFileInTarSize(manifest.Config.Size) + for _, l := range manifest.Layers { + hex := l.Digest.Hex + if _, ok := seenLayerDigests[hex]; ok { + continue + } + seenLayerDigests[hex] = struct{}{} + size += calculateSingleFileInTarSize(l.Size) + } + } + // add the manifest + size += calculateSingleFileInTarSize(int64(len(mBytes))) + + // add the two padding blocks that indicate end of a tar file + size += 1024 + return size, nil +} + +func dedupRefToImage(refToImage map[name.Reference]v1.Image) map[v1.Image][]string { + imageToTags := make(map[v1.Image][]string) + + for ref, img := range refToImage { + if tag, ok := ref.(name.Tag); ok { + if tags, ok := imageToTags[img]; !ok || tags == nil { + imageToTags[img] = []string{} + } + // Docker cannot load tarballs without an explicit tag: + // https://github.com/google/go-containerregistry/issues/890 + // + // We can't use the fully qualified tag.Name() because of rules_docker: + // https://github.com/google/go-containerregistry/issues/527 + // + // If the tag is "latest", but tag.String() doesn't end in ":latest", + // just append it. Kind of gross, but should work for now. + ts := tag.String() + if tag.Identifier() == name.DefaultTag && !strings.HasSuffix(ts, ":"+name.DefaultTag) { + ts = fmt.Sprintf("%s:%s", ts, name.DefaultTag) + } + imageToTags[img] = append(imageToTags[img], ts) + } else if _, ok := imageToTags[img]; !ok { + imageToTags[img] = nil + } + } + + return imageToTags +} + +// writeTarEntry writes a file to the provided writer with a corresponding tar header +func writeTarEntry(tf *tar.Writer, path string, r io.Reader, size int64) error { + hdr := &tar.Header{ + Mode: 0644, + Typeflag: tar.TypeReg, + Size: size, + Name: path, + } + if err := tf.WriteHeader(hdr); err != nil { + return err + } + _, err := io.Copy(tf, r) + return err +} + +// ComputeManifest get the manifest.json that will be written to the tarball +// for multiple references +func ComputeManifest(refToImage map[name.Reference]v1.Image) (Manifest, error) { + return calculateManifest(refToImage) +} + +// WriteOption a function option to pass to Write() +type WriteOption func(*writeOptions) error +type writeOptions struct { + updates chan<- v1.Update +} + +// WithProgress create a WriteOption for passing to Write() that enables +// a channel to receive updates as they are downloaded and written to disk. +func WithProgress(updates chan<- v1.Update) WriteOption { + return func(o *writeOptions) error { + o.updates = updates + return nil + } +} + +// progressWriter is a writer which will send the download progress +type progressWriter struct { + w io.Writer + updates chan<- v1.Update + size, complete int64 +} + +func (pw *progressWriter) Write(p []byte) (int, error) { + n, err := pw.w.Write(p) + if err != nil { + return n, err + } + + pw.complete += int64(n) + + pw.updates <- v1.Update{ + Total: pw.size, + Complete: pw.complete, + } + + return n, err +} + +func (pw *progressWriter) Error(err error) error { + pw.updates <- v1.Update{ + Total: pw.size, + Complete: pw.complete, + Error: err, + } + return err +} + +func (pw *progressWriter) Close() error { + pw.updates <- v1.Update{ + Total: pw.size, + Complete: pw.complete, + Error: io.EOF, + } + return io.EOF +} + +// calculateSingleFileInTarSize calculate the size a file will take up in a tar archive, +// given the input data. Provided by rounding up to nearest whole block (512) +// and adding header 512 +func calculateSingleFileInTarSize(in int64) (out int64) { + // doing this manually, because math.Round() works with float64 + out += in + if remainder := out % 512; remainder != 0 { + out += (512 - remainder) + } + out += 512 + return out +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go b/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go new file mode 100644 index 00000000..363e989f --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go @@ -0,0 +1,73 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package types holds common OCI media types. +package types + +// MediaType is an enumeration of the supported mime types that an element of an image might have. +type MediaType string + +// The collection of known MediaType values. +const ( + OCIContentDescriptor MediaType = "application/vnd.oci.descriptor.v1+json" + OCIImageIndex MediaType = "application/vnd.oci.image.index.v1+json" + OCIManifestSchema1 MediaType = "application/vnd.oci.image.manifest.v1+json" + OCIConfigJSON MediaType = "application/vnd.oci.image.config.v1+json" + OCILayer MediaType = "application/vnd.oci.image.layer.v1.tar+gzip" + OCILayerZStd MediaType = "application/vnd.oci.image.layer.v1.tar+zstd" + OCIRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + OCIUncompressedLayer MediaType = "application/vnd.oci.image.layer.v1.tar" + OCIUncompressedRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar" + + DockerManifestSchema1 MediaType = "application/vnd.docker.distribution.manifest.v1+json" + DockerManifestSchema1Signed MediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" + DockerManifestSchema2 MediaType = "application/vnd.docker.distribution.manifest.v2+json" + DockerManifestList MediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + DockerLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + DockerConfigJSON MediaType = "application/vnd.docker.container.image.v1+json" + DockerPluginConfig MediaType = "application/vnd.docker.plugin.v1+json" + DockerForeignLayer MediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + DockerUncompressedLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar" + + OCIVendorPrefix = "vnd.oci" + DockerVendorPrefix = "vnd.docker" +) + +// IsDistributable returns true if a layer is distributable, see: +// https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers +func (m MediaType) IsDistributable() bool { + switch m { + case DockerForeignLayer, OCIRestrictedLayer, OCIUncompressedRestrictedLayer: + return false + } + return true +} + +// IsImage returns true if the mediaType represents an image manifest, as opposed to something else, like an index. +func (m MediaType) IsImage() bool { + switch m { + case OCIManifestSchema1, DockerManifestSchema2: + return true + } + return false +} + +// IsIndex returns true if the mediaType represents an index, as opposed to something else, like an image. +func (m MediaType) IsIndex() bool { + switch m { + case OCIImageIndex, DockerManifestList: + return true + } + return false +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go b/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go new file mode 100644 index 00000000..0cb1586f --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go @@ -0,0 +1,324 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = new(HealthConfig) + (*in).DeepCopyInto(*out) + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Shell != nil { + in, out := &in.Shell, &out.Shell + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigFile) DeepCopyInto(out *ConfigFile) { + *out = *in + in.Created.DeepCopyInto(&out.Created) + if in.History != nil { + in, out := &in.History, &out.History + *out = make([]History, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.RootFS.DeepCopyInto(&out.RootFS) + in.Config.DeepCopyInto(&out.Config) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigFile. +func (in *ConfigFile) DeepCopy() *ConfigFile { + if in == nil { + return nil + } + out := new(ConfigFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Descriptor) DeepCopyInto(out *Descriptor) { + *out = *in + out.Digest = in.Digest + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.URLs != nil { + in, out := &in.URLs, &out.URLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Platform != nil { + in, out := &in.Platform, &out.Platform + *out = new(Platform) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Descriptor. +func (in *Descriptor) DeepCopy() *Descriptor { + if in == nil { + return nil + } + out := new(Descriptor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Hash) DeepCopyInto(out *Hash) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hash. +func (in *Hash) DeepCopy() *Hash { + if in == nil { + return nil + } + out := new(Hash) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthConfig) DeepCopyInto(out *HealthConfig) { + *out = *in + if in.Test != nil { + in, out := &in.Test, &out.Test + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthConfig. +func (in *HealthConfig) DeepCopy() *HealthConfig { + if in == nil { + return nil + } + out := new(HealthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *History) DeepCopyInto(out *History) { + *out = *in + in.Created.DeepCopyInto(&out.Created) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new History. +func (in *History) DeepCopy() *History { + if in == nil { + return nil + } + out := new(History) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManifest) DeepCopyInto(out *IndexManifest) { + *out = *in + if in.Manifests != nil { + in, out := &in.Manifests, &out.Manifests + *out = make([]Descriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManifest. +func (in *IndexManifest) DeepCopy() *IndexManifest { + if in == nil { + return nil + } + out := new(IndexManifest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Manifest) DeepCopyInto(out *Manifest) { + *out = *in + in.Config.DeepCopyInto(&out.Config) + if in.Layers != nil { + in, out := &in.Layers, &out.Layers + *out = make([]Descriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Manifest. +func (in *Manifest) DeepCopy() *Manifest { + if in == nil { + return nil + } + out := new(Manifest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + if in.OSFeatures != nil { + in, out := &in.OSFeatures, &out.OSFeatures + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Features != nil { + in, out := &in.Features, &out.Features + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootFS) DeepCopyInto(out *RootFS) { + *out = *in + if in.DiffIDs != nil { + in, out := &in.DiffIDs, &out.DiffIDs + *out = make([]Hash, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootFS. +func (in *RootFS) DeepCopy() *RootFS { + if in == nil { + return nil + } + out := new(RootFS) + in.DeepCopyInto(out) + return out +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. +func (in *Time) DeepCopy() *Time { + if in == nil { + return nil + } + out := new(Time) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/google/s2a-go/.gitignore b/vendor/github.com/google/s2a-go/.gitignore new file mode 100644 index 00000000..01764d1c --- /dev/null +++ b/vendor/github.com/google/s2a-go/.gitignore @@ -0,0 +1,6 @@ +# Ignore binaries without extension +//example/client/client +//example/server/server +//internal/v2/fakes2av2_server/fakes2av2_server + +.idea/ \ No newline at end of file diff --git a/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md b/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..dc079b4d --- /dev/null +++ b/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md @@ -0,0 +1,93 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of +experience, education, socio-economic status, nationality, personal appearance, +race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, or to ban temporarily or permanently any +contributor for other behaviors that they deem inappropriate, threatening, +offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when the Project +Steward has a reasonable belief that an individual's behavior may have a +negative impact on the project or its community. + +## Conflict Resolution + +We do not believe that all conflict is bad; healthy debate and disagreement +often yield positive results. However, it is never okay to be disrespectful or +to engage in behavior that violates the project’s code of conduct. + +If you see someone violating the code of conduct, you are encouraged to address +the behavior directly with those involved. Many issues can be resolved quickly +and easily, and this gives people more control over the outcome of their +dispute. If you are unable to resolve the matter for any reason, or if the +behavior is threatening or harassing, report it. We are dedicated to providing +an environment where participants feel welcome and safe. + +Reports should be directed to *[PROJECT STEWARD NAME(s) AND EMAIL(s)]*, the +Project Steward(s) for *[PROJECT NAME]*. It is the Project Steward’s duty to +receive and address reported violations of the code of conduct. They will then +work with a committee consisting of representatives from the Open Source +Programs Office and the Google Open Source Strategy team. If for any reason you +are uncomfortable reaching out to the Project Steward, please email +opensource@google.com. + +We will investigate every complaint, but you may not receive a direct response. +We will use our discretion in determining when and how to follow up on reported +incidents, which may range from not taking action to permanent expulsion from +the project and project-sponsored spaces. We will notify the accused of the +report and provide them an opportunity to discuss it before any action is taken. +The identity of the reporter will be omitted from the details of the report +supplied to the accused. In potentially harmful situations, such as ongoing +harassment or threats to anyone's safety, we may take action without notice. + +## Attribution + +This Code of Conduct is adapted from the Contributor Covenant, version 1.4, +available at +https://www.contributor-covenant.org/version/1/4/code-of-conduct.html diff --git a/vendor/github.com/google/s2a-go/CONTRIBUTING.md b/vendor/github.com/google/s2a-go/CONTRIBUTING.md new file mode 100644 index 00000000..22b241cb --- /dev/null +++ b/vendor/github.com/google/s2a-go/CONTRIBUTING.md @@ -0,0 +1,29 @@ +# How to Contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement (CLA). You (or your employer) retain the copyright to your +contribution; this simply gives us permission to use and redistribute your +contributions as part of the project. Head over to + to see your current agreements on file or +to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult +[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more +information on using pull requests. + +## Community Guidelines + +This project follows +[Google's Open Source Community Guidelines](https://opensource.google/conduct/). diff --git a/vendor/github.com/google/s2a-go/LICENSE.md b/vendor/github.com/google/s2a-go/LICENSE.md new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/google/s2a-go/LICENSE.md @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/s2a-go/README.md b/vendor/github.com/google/s2a-go/README.md new file mode 100644 index 00000000..d566950f --- /dev/null +++ b/vendor/github.com/google/s2a-go/README.md @@ -0,0 +1,17 @@ +# Secure Session Agent Client Libraries + +The Secure Session Agent is a service that enables a workload to offload select +operations from the mTLS handshake and protects a workload's private key +material from exfiltration. Specifically, the workload asks the Secure Session +Agent for the TLS configuration to use during the handshake, to perform private +key operations, and to validate the peer certificate chain. The Secure Session +Agent's client libraries enable applications to communicate with the Secure +Session Agent during the TLS handshake, and to encrypt traffic to the peer +after the TLS handshake is complete. + +This repository contains the source code for the Secure Session Agent's Go +client libraries, which allow gRPC-Go applications to use the Secure Session +Agent. This repository supports the Bazel and Golang build systems. + +All code in this repository is experimental and subject to change. We do not +guarantee API stability at this time. diff --git a/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go b/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go new file mode 100644 index 00000000..034d1b91 --- /dev/null +++ b/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package fallback provides default implementations of fallback options when S2A fails. +package fallback + +import ( + "context" + "crypto/tls" + "fmt" + "net" + + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +const ( + alpnProtoStrH2 = "h2" + alpnProtoStrHTTP = "http/1.1" + defaultHTTPSPort = "443" +) + +// FallbackTLSConfigGRPC is a tls.Config used by the DefaultFallbackClientHandshakeFunc function. +// It supports GRPC use case, thus the alpn is set to 'h2'. +var FallbackTLSConfigGRPC = tls.Config{ + MinVersion: tls.VersionTLS13, + ClientSessionCache: nil, + NextProtos: []string{alpnProtoStrH2}, +} + +// FallbackTLSConfigHTTP is a tls.Config used by the DefaultFallbackDialerAndAddress func. +// It supports the HTTP use case and the alpn is set to both 'http/1.1' and 'h2'. +var FallbackTLSConfigHTTP = tls.Config{ + MinVersion: tls.VersionTLS13, + ClientSessionCache: nil, + NextProtos: []string{alpnProtoStrH2, alpnProtoStrHTTP}, +} + +// ClientHandshake establishes a TLS connection and returns it, plus its auth info. +// Inputs: +// +// targetServer: the server attempted with S2A. +// conn: the tcp connection to the server at address targetServer that was passed into S2A's ClientHandshake func. +// If fallback is successful, the `conn` should be closed. +// err: the error encountered when performing the client-side TLS handshake with S2A. +type ClientHandshake func(ctx context.Context, targetServer string, conn net.Conn, err error) (net.Conn, credentials.AuthInfo, error) + +// DefaultFallbackClientHandshakeFunc returns a ClientHandshake function, +// which establishes a TLS connection to the provided fallbackAddr, returns the new connection and its auth info. +// Example use: +// +// transportCreds, _ = s2a.NewClientCreds(&s2a.ClientOptions{ +// S2AAddress: s2aAddress, +// FallbackOpts: &s2a.FallbackOptions{ // optional +// FallbackClientHandshakeFunc: fallback.DefaultFallbackClientHandshakeFunc(fallbackAddr), +// }, +// }) +// +// The fallback server's certificate must be verifiable using OS root store. +// The fallbackAddr is expected to be a network address, e.g. example.com:port. If port is not specified, +// it uses default port 443. +// In the returned function's TLS config, ClientSessionCache is explicitly set to nil to disable TLS resumption, +// and min TLS version is set to 1.3. +func DefaultFallbackClientHandshakeFunc(fallbackAddr string) (ClientHandshake, error) { + var fallbackDialer = tls.Dialer{Config: &FallbackTLSConfigGRPC} + return defaultFallbackClientHandshakeFuncInternal(fallbackAddr, fallbackDialer.DialContext) +} + +func defaultFallbackClientHandshakeFuncInternal(fallbackAddr string, dialContextFunc func(context.Context, string, string) (net.Conn, error)) (ClientHandshake, error) { + fallbackServerAddr, err := processFallbackAddr(fallbackAddr) + if err != nil { + if grpclog.V(1) { + grpclog.Infof("error processing fallback address [%s]: %v", fallbackAddr, err) + } + return nil, err + } + return func(ctx context.Context, targetServer string, conn net.Conn, s2aErr error) (net.Conn, credentials.AuthInfo, error) { + fbConn, fbErr := dialContextFunc(ctx, "tcp", fallbackServerAddr) + if fbErr != nil { + grpclog.Infof("dialing to fallback server %s failed: %v", fallbackServerAddr, fbErr) + return nil, nil, fmt.Errorf("dialing to fallback server %s failed: %v; S2A client handshake with %s error: %w", fallbackServerAddr, fbErr, targetServer, s2aErr) + } + + tc, success := fbConn.(*tls.Conn) + if !success { + grpclog.Infof("the connection with fallback server is expected to be tls but isn't") + return nil, nil, fmt.Errorf("the connection with fallback server is expected to be tls but isn't; S2A client handshake with %s error: %w", targetServer, s2aErr) + } + + tlsInfo := credentials.TLSInfo{ + State: tc.ConnectionState(), + CommonAuthInfo: credentials.CommonAuthInfo{ + SecurityLevel: credentials.PrivacyAndIntegrity, + }, + } + if grpclog.V(1) { + grpclog.Infof("ConnectionState.NegotiatedProtocol: %v", tc.ConnectionState().NegotiatedProtocol) + grpclog.Infof("ConnectionState.HandshakeComplete: %v", tc.ConnectionState().HandshakeComplete) + grpclog.Infof("ConnectionState.ServerName: %v", tc.ConnectionState().ServerName) + } + conn.Close() + return fbConn, tlsInfo, nil + }, nil +} + +// DefaultFallbackDialerAndAddress returns a TLS dialer and the network address to dial. +// Example use: +// +// fallbackDialer, fallbackServerAddr := fallback.DefaultFallbackDialerAndAddress(fallbackAddr) +// dialTLSContext := s2a.NewS2aDialTLSContextFunc(&s2a.ClientOptions{ +// S2AAddress: s2aAddress, // required +// FallbackOpts: &s2a.FallbackOptions{ +// FallbackDialer: &s2a.FallbackDialer{ +// Dialer: fallbackDialer, +// ServerAddr: fallbackServerAddr, +// }, +// }, +// }) +// +// The fallback server's certificate should be verifiable using OS root store. +// The fallbackAddr is expected to be a network address, e.g. example.com:port. If port is not specified, +// it uses default port 443. +// In the returned function's TLS config, ClientSessionCache is explicitly set to nil to disable TLS resumption, +// and min TLS version is set to 1.3. +func DefaultFallbackDialerAndAddress(fallbackAddr string) (*tls.Dialer, string, error) { + fallbackServerAddr, err := processFallbackAddr(fallbackAddr) + if err != nil { + if grpclog.V(1) { + grpclog.Infof("error processing fallback address [%s]: %v", fallbackAddr, err) + } + return nil, "", err + } + return &tls.Dialer{Config: &FallbackTLSConfigHTTP}, fallbackServerAddr, nil +} + +func processFallbackAddr(fallbackAddr string) (string, error) { + var fallbackServerAddr string + var err error + + if fallbackAddr == "" { + return "", fmt.Errorf("empty fallback address") + } + _, _, err = net.SplitHostPort(fallbackAddr) + if err != nil { + // fallbackAddr does not have port suffix + fallbackServerAddr = net.JoinHostPort(fallbackAddr, defaultHTTPSPort) + } else { + // FallbackServerAddr already has port suffix + fallbackServerAddr = fallbackAddr + } + return fallbackServerAddr, nil +} diff --git a/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go b/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go new file mode 100644 index 00000000..aa3967f9 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go @@ -0,0 +1,119 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package authinfo provides authentication and authorization information that +// results from the TLS handshake. +package authinfo + +import ( + "errors" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + contextpb "github.com/google/s2a-go/internal/proto/s2a_context_go_proto" + grpcpb "github.com/google/s2a-go/internal/proto/s2a_go_proto" + "google.golang.org/grpc/credentials" +) + +var _ credentials.AuthInfo = (*S2AAuthInfo)(nil) + +const s2aAuthType = "s2a" + +// S2AAuthInfo exposes authentication and authorization information from the +// S2A session result to the gRPC stack. +type S2AAuthInfo struct { + s2aContext *contextpb.S2AContext + commonAuthInfo credentials.CommonAuthInfo +} + +// NewS2AAuthInfo returns a new S2AAuthInfo object from the S2A session result. +func NewS2AAuthInfo(result *grpcpb.SessionResult) (credentials.AuthInfo, error) { + return newS2AAuthInfo(result) +} + +func newS2AAuthInfo(result *grpcpb.SessionResult) (*S2AAuthInfo, error) { + if result == nil { + return nil, errors.New("NewS2aAuthInfo given nil session result") + } + return &S2AAuthInfo{ + s2aContext: &contextpb.S2AContext{ + ApplicationProtocol: result.GetApplicationProtocol(), + TlsVersion: result.GetState().GetTlsVersion(), + Ciphersuite: result.GetState().GetTlsCiphersuite(), + PeerIdentity: result.GetPeerIdentity(), + LocalIdentity: result.GetLocalIdentity(), + PeerCertFingerprint: result.GetPeerCertFingerprint(), + LocalCertFingerprint: result.GetLocalCertFingerprint(), + IsHandshakeResumed: result.GetState().GetIsHandshakeResumed(), + }, + commonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}, + }, nil +} + +// AuthType returns the authentication type. +func (s *S2AAuthInfo) AuthType() string { + return s2aAuthType +} + +// ApplicationProtocol returns the application protocol, e.g. "grpc". +func (s *S2AAuthInfo) ApplicationProtocol() string { + return s.s2aContext.GetApplicationProtocol() +} + +// TLSVersion returns the TLS version negotiated during the handshake. +func (s *S2AAuthInfo) TLSVersion() commonpb.TLSVersion { + return s.s2aContext.GetTlsVersion() +} + +// Ciphersuite returns the ciphersuite negotiated during the handshake. +func (s *S2AAuthInfo) Ciphersuite() commonpb.Ciphersuite { + return s.s2aContext.GetCiphersuite() +} + +// PeerIdentity returns the authenticated identity of the peer. +func (s *S2AAuthInfo) PeerIdentity() *commonpb.Identity { + return s.s2aContext.GetPeerIdentity() +} + +// LocalIdentity returns the local identity of the application used during +// session setup. +func (s *S2AAuthInfo) LocalIdentity() *commonpb.Identity { + return s.s2aContext.GetLocalIdentity() +} + +// PeerCertFingerprint returns the SHA256 hash of the peer certificate used in +// the S2A handshake. +func (s *S2AAuthInfo) PeerCertFingerprint() []byte { + return s.s2aContext.GetPeerCertFingerprint() +} + +// LocalCertFingerprint returns the SHA256 hash of the local certificate used +// in the S2A handshake. +func (s *S2AAuthInfo) LocalCertFingerprint() []byte { + return s.s2aContext.GetLocalCertFingerprint() +} + +// IsHandshakeResumed returns true if a cached session was used to resume +// the handshake. +func (s *S2AAuthInfo) IsHandshakeResumed() bool { + return s.s2aContext.GetIsHandshakeResumed() +} + +// SecurityLevel returns the security level of the connection. +func (s *S2AAuthInfo) SecurityLevel() credentials.SecurityLevel { + return s.commonAuthInfo.SecurityLevel +} diff --git a/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go b/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go new file mode 100644 index 00000000..8297c9a9 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go @@ -0,0 +1,438 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package handshaker communicates with the S2A handshaker service. +package handshaker + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "sync" + + "github.com/google/s2a-go/internal/authinfo" + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + s2apb "github.com/google/s2a-go/internal/proto/s2a_go_proto" + "github.com/google/s2a-go/internal/record" + "github.com/google/s2a-go/internal/tokenmanager" + grpc "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" +) + +var ( + // appProtocol contains the application protocol accepted by the handshaker. + appProtocol = "grpc" + // frameLimit is the maximum size of a frame in bytes. + frameLimit = 1024 * 64 + // peerNotRespondingError is the error thrown when the peer doesn't respond. + errPeerNotResponding = errors.New("peer is not responding and re-connection should be attempted") +) + +// Handshaker defines a handshaker interface. +type Handshaker interface { + // ClientHandshake starts and completes a TLS handshake from the client side, + // and returns a secure connection along with additional auth information. + ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // ServerHandshake starts and completes a TLS handshake from the server side, + // and returns a secure connection along with additional auth information. + ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) + // Close terminates the Handshaker. It should be called when the handshake + // is complete. + Close() error +} + +// ClientHandshakerOptions contains the options needed to configure the S2A +// handshaker service on the client-side. +type ClientHandshakerOptions struct { + // MinTLSVersion specifies the min TLS version supported by the client. + MinTLSVersion commonpb.TLSVersion + // MaxTLSVersion specifies the max TLS version supported by the client. + MaxTLSVersion commonpb.TLSVersion + // TLSCiphersuites is the ordered list of ciphersuites supported by the + // client. + TLSCiphersuites []commonpb.Ciphersuite + // TargetIdentities contains a list of allowed server identities. One of the + // target identities should match the peer identity in the handshake + // result; otherwise, the handshake fails. + TargetIdentities []*commonpb.Identity + // LocalIdentity is the local identity of the client application. If none is + // provided, then the S2A will choose the default identity. + LocalIdentity *commonpb.Identity + // TargetName is the allowed server name, which may be used for server + // authorization check by the S2A if it is provided. + TargetName string + // EnsureProcessSessionTickets allows users to wait and ensure that all + // available session tickets are sent to S2A before a process completes. + EnsureProcessSessionTickets *sync.WaitGroup +} + +// ServerHandshakerOptions contains the options needed to configure the S2A +// handshaker service on the server-side. +type ServerHandshakerOptions struct { + // MinTLSVersion specifies the min TLS version supported by the server. + MinTLSVersion commonpb.TLSVersion + // MaxTLSVersion specifies the max TLS version supported by the server. + MaxTLSVersion commonpb.TLSVersion + // TLSCiphersuites is the ordered list of ciphersuites supported by the + // server. + TLSCiphersuites []commonpb.Ciphersuite + // LocalIdentities is the list of local identities that may be assumed by + // the server. If no local identity is specified, then the S2A chooses a + // default local identity. + LocalIdentities []*commonpb.Identity +} + +// s2aHandshaker performs a TLS handshake using the S2A handshaker service. +type s2aHandshaker struct { + // stream is used to communicate with the S2A handshaker service. + stream s2apb.S2AService_SetUpSessionClient + // conn is the connection to the peer. + conn net.Conn + // clientOpts should be non-nil iff the handshaker is client-side. + clientOpts *ClientHandshakerOptions + // serverOpts should be non-nil iff the handshaker is server-side. + serverOpts *ServerHandshakerOptions + // isClient determines if the handshaker is client or server side. + isClient bool + // hsAddr stores the address of the S2A handshaker service. + hsAddr string + // tokenManager manages access tokens for authenticating to S2A. + tokenManager tokenmanager.AccessTokenManager + // localIdentities is the set of local identities for whom the + // tokenManager should fetch a token when preparing a request to be + // sent to S2A. + localIdentities []*commonpb.Identity +} + +// NewClientHandshaker creates an s2aHandshaker instance that performs a +// client-side TLS handshake using the S2A handshaker service. +func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, hsAddr string, opts *ClientHandshakerOptions) (Handshaker, error) { + stream, err := s2apb.NewS2AServiceClient(conn).SetUpSession(ctx, grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + if err != nil { + grpclog.Infof("failed to create single token access token manager: %v", err) + } + return newClientHandshaker(stream, c, hsAddr, opts, tokenManager), nil +} + +func newClientHandshaker(stream s2apb.S2AService_SetUpSessionClient, c net.Conn, hsAddr string, opts *ClientHandshakerOptions, tokenManager tokenmanager.AccessTokenManager) *s2aHandshaker { + var localIdentities []*commonpb.Identity + if opts != nil { + localIdentities = []*commonpb.Identity{opts.LocalIdentity} + } + return &s2aHandshaker{ + stream: stream, + conn: c, + clientOpts: opts, + isClient: true, + hsAddr: hsAddr, + tokenManager: tokenManager, + localIdentities: localIdentities, + } +} + +// NewServerHandshaker creates an s2aHandshaker instance that performs a +// server-side TLS handshake using the S2A handshaker service. +func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, hsAddr string, opts *ServerHandshakerOptions) (Handshaker, error) { + stream, err := s2apb.NewS2AServiceClient(conn).SetUpSession(ctx, grpc.WaitForReady(true)) + if err != nil { + return nil, err + } + tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + if err != nil { + grpclog.Infof("failed to create single token access token manager: %v", err) + } + return newServerHandshaker(stream, c, hsAddr, opts, tokenManager), nil +} + +func newServerHandshaker(stream s2apb.S2AService_SetUpSessionClient, c net.Conn, hsAddr string, opts *ServerHandshakerOptions, tokenManager tokenmanager.AccessTokenManager) *s2aHandshaker { + var localIdentities []*commonpb.Identity + if opts != nil { + localIdentities = opts.LocalIdentities + } + return &s2aHandshaker{ + stream: stream, + conn: c, + serverOpts: opts, + isClient: false, + hsAddr: hsAddr, + tokenManager: tokenManager, + localIdentities: localIdentities, + } +} + +// ClientHandshake performs a client-side TLS handshake using the S2A handshaker +// service. When complete, returns a TLS connection. +func (h *s2aHandshaker) ClientHandshake(_ context.Context) (net.Conn, credentials.AuthInfo, error) { + if !h.isClient { + return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client-side handshake") + } + // Extract the hostname from the target name. The target name is assumed to be an authority. + hostname, _, err := net.SplitHostPort(h.clientOpts.TargetName) + if err != nil { + // If the target name had no host port or could not be parsed, use it as is. + hostname = h.clientOpts.TargetName + } + + // Prepare a client start message to send to the S2A handshaker service. + req := &s2apb.SessionReq{ + ReqOneof: &s2apb.SessionReq_ClientStart{ + ClientStart: &s2apb.ClientSessionStartReq{ + ApplicationProtocols: []string{appProtocol}, + MinTlsVersion: h.clientOpts.MinTLSVersion, + MaxTlsVersion: h.clientOpts.MaxTLSVersion, + TlsCiphersuites: h.clientOpts.TLSCiphersuites, + TargetIdentities: h.clientOpts.TargetIdentities, + LocalIdentity: h.clientOpts.LocalIdentity, + TargetName: hostname, + }, + }, + AuthMechanisms: h.getAuthMechanisms(), + } + conn, result, err := h.setUpSession(req) + if err != nil { + return nil, nil, err + } + authInfo, err := authinfo.NewS2AAuthInfo(result) + if err != nil { + return nil, nil, err + } + return conn, authInfo, nil +} + +// ServerHandshake performs a server-side TLS handshake using the S2A handshaker +// service. When complete, returns a TLS connection. +func (h *s2aHandshaker) ServerHandshake(_ context.Context) (net.Conn, credentials.AuthInfo, error) { + if h.isClient { + return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server-side handshake") + } + p := make([]byte, frameLimit) + n, err := h.conn.Read(p) + if err != nil { + return nil, nil, err + } + // Prepare a server start message to send to the S2A handshaker service. + req := &s2apb.SessionReq{ + ReqOneof: &s2apb.SessionReq_ServerStart{ + ServerStart: &s2apb.ServerSessionStartReq{ + ApplicationProtocols: []string{appProtocol}, + MinTlsVersion: h.serverOpts.MinTLSVersion, + MaxTlsVersion: h.serverOpts.MaxTLSVersion, + TlsCiphersuites: h.serverOpts.TLSCiphersuites, + LocalIdentities: h.serverOpts.LocalIdentities, + InBytes: p[:n], + }, + }, + AuthMechanisms: h.getAuthMechanisms(), + } + conn, result, err := h.setUpSession(req) + if err != nil { + return nil, nil, err + } + authInfo, err := authinfo.NewS2AAuthInfo(result) + if err != nil { + return nil, nil, err + } + return conn, authInfo, nil +} + +// setUpSession proxies messages between the peer and the S2A handshaker +// service. +func (h *s2aHandshaker) setUpSession(req *s2apb.SessionReq) (net.Conn, *s2apb.SessionResult, error) { + resp, err := h.accessHandshakerService(req) + if err != nil { + return nil, nil, err + } + // Check if the returned status is an error. + if resp.GetStatus() != nil { + if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want { + return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details) + } + } + // Calculate the extra unread bytes from the Session. Attempting to consume + // more than the bytes sent will throw an error. + var extra []byte + if req.GetServerStart() != nil { + if resp.GetBytesConsumed() > uint32(len(req.GetServerStart().GetInBytes())) { + return nil, nil, errors.New("handshaker service consumed bytes value is out-of-bounds") + } + extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():] + } + result, extra, err := h.processUntilDone(resp, extra) + if err != nil { + return nil, nil, err + } + if result.GetLocalIdentity() == nil { + return nil, nil, errors.New("local identity must be populated in session result") + } + + // Create a new TLS record protocol using the Session Result. + newConn, err := record.NewConn(&record.ConnParameters{ + NetConn: h.conn, + Ciphersuite: result.GetState().GetTlsCiphersuite(), + TLSVersion: result.GetState().GetTlsVersion(), + InTrafficSecret: result.GetState().GetInKey(), + OutTrafficSecret: result.GetState().GetOutKey(), + UnusedBuf: extra, + InSequence: result.GetState().GetInSequence(), + OutSequence: result.GetState().GetOutSequence(), + HSAddr: h.hsAddr, + ConnectionID: result.GetState().GetConnectionId(), + LocalIdentity: result.GetLocalIdentity(), + EnsureProcessSessionTickets: h.ensureProcessSessionTickets(), + }) + if err != nil { + return nil, nil, err + } + return newConn, result, nil +} + +func (h *s2aHandshaker) ensureProcessSessionTickets() *sync.WaitGroup { + if h.clientOpts == nil { + return nil + } + return h.clientOpts.EnsureProcessSessionTickets +} + +// accessHandshakerService sends the session request to the S2A handshaker +// service and returns the session response. +func (h *s2aHandshaker) accessHandshakerService(req *s2apb.SessionReq) (*s2apb.SessionResp, error) { + if err := h.stream.Send(req); err != nil { + return nil, err + } + resp, err := h.stream.Recv() + if err != nil { + return nil, err + } + return resp, nil +} + +// processUntilDone continues proxying messages between the peer and the S2A +// handshaker service until the handshaker service returns the SessionResult at +// the end of the handshake or an error occurs. +func (h *s2aHandshaker) processUntilDone(resp *s2apb.SessionResp, unusedBytes []byte) (*s2apb.SessionResult, []byte, error) { + for { + if len(resp.OutFrames) > 0 { + if _, err := h.conn.Write(resp.OutFrames); err != nil { + return nil, nil, err + } + } + if resp.Result != nil { + return resp.Result, unusedBytes, nil + } + buf := make([]byte, frameLimit) + n, err := h.conn.Read(buf) + if err != nil && err != io.EOF { + return nil, nil, err + } + // If there is nothing to send to the handshaker service and nothing is + // received from the peer, then we are stuck. This covers the case when + // the peer is not responding. Note that handshaker service connection + // issues are caught in accessHandshakerService before we even get + // here. + if len(resp.OutFrames) == 0 && n == 0 { + return nil, nil, errPeerNotResponding + } + // Append extra bytes from the previous interaction with the handshaker + // service with the current buffer read from conn. + p := append(unusedBytes, buf[:n]...) + // From here on, p and unusedBytes point to the same slice. + resp, err = h.accessHandshakerService(&s2apb.SessionReq{ + ReqOneof: &s2apb.SessionReq_Next{ + Next: &s2apb.SessionNextReq{ + InBytes: p, + }, + }, + AuthMechanisms: h.getAuthMechanisms(), + }) + if err != nil { + return nil, nil, err + } + + // Cache the local identity returned by S2A, if it is populated. This + // overwrites any existing local identities. This is done because, once the + // S2A has selected a local identity, then only that local identity should + // be asserted in future requests until the end of the current handshake. + if resp.GetLocalIdentity() != nil { + h.localIdentities = []*commonpb.Identity{resp.GetLocalIdentity()} + } + + // Set unusedBytes based on the handshaker service response. + if resp.GetBytesConsumed() > uint32(len(p)) { + return nil, nil, errors.New("handshaker service consumed bytes value is out-of-bounds") + } + unusedBytes = p[resp.GetBytesConsumed():] + } +} + +// Close shuts down the handshaker and the stream to the S2A handshaker service +// when the handshake is complete. It should be called when the caller obtains +// the secure connection at the end of the handshake. +func (h *s2aHandshaker) Close() error { + return h.stream.CloseSend() +} + +func (h *s2aHandshaker) getAuthMechanisms() []*s2apb.AuthenticationMechanism { + if h.tokenManager == nil { + return nil + } + // First handle the special case when no local identities have been provided + // by the application. In this case, an AuthenticationMechanism with no local + // identity will be sent. + if len(h.localIdentities) == 0 { + token, err := h.tokenManager.DefaultToken() + if err != nil { + grpclog.Infof("unable to get token for empty local identity: %v", err) + return nil + } + return []*s2apb.AuthenticationMechanism{ + { + MechanismOneof: &s2apb.AuthenticationMechanism_Token{ + Token: token, + }, + }, + } + } + + // Next, handle the case where the application (or the S2A) has provided + // one or more local identities. + var authMechanisms []*s2apb.AuthenticationMechanism + for _, localIdentity := range h.localIdentities { + token, err := h.tokenManager.Token(localIdentity) + if err != nil { + grpclog.Infof("unable to get token for local identity %v: %v", localIdentity, err) + continue + } + + authMechanism := &s2apb.AuthenticationMechanism{ + Identity: localIdentity, + MechanismOneof: &s2apb.AuthenticationMechanism_Token{ + Token: token, + }, + } + authMechanisms = append(authMechanisms, authMechanism) + } + return authMechanisms +} diff --git a/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go b/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go new file mode 100644 index 00000000..49573af8 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go @@ -0,0 +1,99 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package service is a utility for calling the S2A handshaker service. +package service + +import ( + "context" + "net" + "os" + "strings" + "sync" + "time" + + "google.golang.org/appengine" + "google.golang.org/appengine/socket" + grpc "google.golang.org/grpc" + "google.golang.org/grpc/grpclog" +) + +// An environment variable, if true, opportunistically use AppEngine-specific dialer to call S2A. +const enableAppEngineDialerEnv = "S2A_ENABLE_APP_ENGINE_DIALER" + +var ( + // appEngineDialerHook is an AppEngine-specific dial option that is set + // during init time. If nil, then the application is not running on Google + // AppEngine. + appEngineDialerHook func(context.Context) grpc.DialOption + // mu guards hsConnMap and hsDialer. + mu sync.Mutex + // hsConnMap represents a mapping from an S2A handshaker service address + // to a corresponding connection to an S2A handshaker service instance. + hsConnMap = make(map[string]*grpc.ClientConn) + // hsDialer will be reassigned in tests. + hsDialer = grpc.Dial +) + +func init() { + if !appengine.IsAppEngine() && !appengine.IsDevAppServer() { + return + } + appEngineDialerHook = func(ctx context.Context) grpc.DialOption { + return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return socket.DialTimeout(ctx, "tcp", addr, timeout) + }) + } +} + +// Dial dials the S2A handshaker service. If a connection has already been +// established, this function returns it. Otherwise, a new connection is +// created. +func Dial(handshakerServiceAddress string) (*grpc.ClientConn, error) { + mu.Lock() + defer mu.Unlock() + + hsConn, ok := hsConnMap[handshakerServiceAddress] + if !ok { + // Create a new connection to the S2A handshaker service. Note that + // this connection stays open until the application is closed. + grpcOpts := []grpc.DialOption{ + grpc.WithInsecure(), + } + if enableAppEngineDialer() && appEngineDialerHook != nil { + if grpclog.V(1) { + grpclog.Info("Using AppEngine-specific dialer to talk to S2A.") + } + grpcOpts = append(grpcOpts, appEngineDialerHook(context.Background())) + } + var err error + hsConn, err = hsDialer(handshakerServiceAddress, grpcOpts...) + if err != nil { + return nil, err + } + hsConnMap[handshakerServiceAddress] = hsConn + } + return hsConn, nil +} + +func enableAppEngineDialer() bool { + if strings.ToLower(os.Getenv(enableAppEngineDialerEnv)) == "true" { + return true + } + return false +} diff --git a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go new file mode 100644 index 00000000..16278a1d --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go @@ -0,0 +1,389 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/common/common.proto + +package common_go_proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The ciphersuites supported by S2A. The name determines the confidentiality, +// and authentication ciphers as well as the hash algorithm used for PRF in +// TLS 1.2 or HKDF in TLS 1.3. Thus, the components of the name are: +// - AEAD -- for encryption and authentication, e.g., AES_128_GCM. +// - Hash algorithm -- used in PRF or HKDF, e.g., SHA256. +type Ciphersuite int32 + +const ( + Ciphersuite_AES_128_GCM_SHA256 Ciphersuite = 0 + Ciphersuite_AES_256_GCM_SHA384 Ciphersuite = 1 + Ciphersuite_CHACHA20_POLY1305_SHA256 Ciphersuite = 2 +) + +// Enum value maps for Ciphersuite. +var ( + Ciphersuite_name = map[int32]string{ + 0: "AES_128_GCM_SHA256", + 1: "AES_256_GCM_SHA384", + 2: "CHACHA20_POLY1305_SHA256", + } + Ciphersuite_value = map[string]int32{ + "AES_128_GCM_SHA256": 0, + "AES_256_GCM_SHA384": 1, + "CHACHA20_POLY1305_SHA256": 2, + } +) + +func (x Ciphersuite) Enum() *Ciphersuite { + p := new(Ciphersuite) + *p = x + return p +} + +func (x Ciphersuite) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Ciphersuite) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_common_common_proto_enumTypes[0].Descriptor() +} + +func (Ciphersuite) Type() protoreflect.EnumType { + return &file_internal_proto_common_common_proto_enumTypes[0] +} + +func (x Ciphersuite) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Ciphersuite.Descriptor instead. +func (Ciphersuite) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_common_common_proto_rawDescGZIP(), []int{0} +} + +// The TLS versions supported by S2A's handshaker module. +type TLSVersion int32 + +const ( + TLSVersion_TLS1_2 TLSVersion = 0 + TLSVersion_TLS1_3 TLSVersion = 1 +) + +// Enum value maps for TLSVersion. +var ( + TLSVersion_name = map[int32]string{ + 0: "TLS1_2", + 1: "TLS1_3", + } + TLSVersion_value = map[string]int32{ + "TLS1_2": 0, + "TLS1_3": 1, + } +) + +func (x TLSVersion) Enum() *TLSVersion { + p := new(TLSVersion) + *p = x + return p +} + +func (x TLSVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TLSVersion) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_common_common_proto_enumTypes[1].Descriptor() +} + +func (TLSVersion) Type() protoreflect.EnumType { + return &file_internal_proto_common_common_proto_enumTypes[1] +} + +func (x TLSVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TLSVersion.Descriptor instead. +func (TLSVersion) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_common_common_proto_rawDescGZIP(), []int{1} +} + +type Identity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to IdentityOneof: + // + // *Identity_SpiffeId + // *Identity_Hostname + // *Identity_Uid + // *Identity_MdbUsername + // *Identity_GaiaId + IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` + // Additional identity-specific attributes. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Identity) Reset() { + *x = Identity{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_common_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Identity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Identity) ProtoMessage() {} + +func (x *Identity) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_common_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Identity.ProtoReflect.Descriptor instead. +func (*Identity) Descriptor() ([]byte, []int) { + return file_internal_proto_common_common_proto_rawDescGZIP(), []int{0} +} + +func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof { + if m != nil { + return m.IdentityOneof + } + return nil +} + +func (x *Identity) GetSpiffeId() string { + if x, ok := x.GetIdentityOneof().(*Identity_SpiffeId); ok { + return x.SpiffeId + } + return "" +} + +func (x *Identity) GetHostname() string { + if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok { + return x.Hostname + } + return "" +} + +func (x *Identity) GetUid() string { + if x, ok := x.GetIdentityOneof().(*Identity_Uid); ok { + return x.Uid + } + return "" +} + +func (x *Identity) GetMdbUsername() string { + if x, ok := x.GetIdentityOneof().(*Identity_MdbUsername); ok { + return x.MdbUsername + } + return "" +} + +func (x *Identity) GetGaiaId() string { + if x, ok := x.GetIdentityOneof().(*Identity_GaiaId); ok { + return x.GaiaId + } + return "" +} + +func (x *Identity) GetAttributes() map[string]string { + if x != nil { + return x.Attributes + } + return nil +} + +type isIdentity_IdentityOneof interface { + isIdentity_IdentityOneof() +} + +type Identity_SpiffeId struct { + // The SPIFFE ID of a connection endpoint. + SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3,oneof"` +} + +type Identity_Hostname struct { + // The hostname of a connection endpoint. + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"` +} + +type Identity_Uid struct { + // The UID of a connection endpoint. + Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"` +} + +type Identity_MdbUsername struct { + // The MDB username of a connection endpoint. + MdbUsername string `protobuf:"bytes,5,opt,name=mdb_username,json=mdbUsername,proto3,oneof"` +} + +type Identity_GaiaId struct { + // The Gaia ID of a connection endpoint. + GaiaId string `protobuf:"bytes,6,opt,name=gaia_id,json=gaiaId,proto3,oneof"` +} + +func (*Identity_SpiffeId) isIdentity_IdentityOneof() {} + +func (*Identity_Hostname) isIdentity_IdentityOneof() {} + +func (*Identity_Uid) isIdentity_IdentityOneof() {} + +func (*Identity_MdbUsername) isIdentity_IdentityOneof() {} + +func (*Identity_GaiaId) isIdentity_IdentityOneof() {} + +var File_internal_proto_common_common_proto protoreflect.FileDescriptor + +var file_internal_proto_common_common_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xb1, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09, + 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68, + 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x23, 0x0a, + 0x0c, 0x6d, 0x64, 0x62, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x64, 0x62, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x07, 0x67, 0x61, 0x69, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x67, 0x61, 0x69, 0x61, 0x49, 0x64, 0x12, 0x43, 0x0a, + 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e, + 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, + 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, + 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, + 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, + 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02, + 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a, + 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c, + 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_common_common_proto_rawDescOnce sync.Once + file_internal_proto_common_common_proto_rawDescData = file_internal_proto_common_common_proto_rawDesc +) + +func file_internal_proto_common_common_proto_rawDescGZIP() []byte { + file_internal_proto_common_common_proto_rawDescOnce.Do(func() { + file_internal_proto_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_common_common_proto_rawDescData) + }) + return file_internal_proto_common_common_proto_rawDescData +} + +var file_internal_proto_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_internal_proto_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_internal_proto_common_common_proto_goTypes = []interface{}{ + (Ciphersuite)(0), // 0: s2a.proto.Ciphersuite + (TLSVersion)(0), // 1: s2a.proto.TLSVersion + (*Identity)(nil), // 2: s2a.proto.Identity + nil, // 3: s2a.proto.Identity.AttributesEntry +} +var file_internal_proto_common_common_proto_depIdxs = []int32{ + 3, // 0: s2a.proto.Identity.attributes:type_name -> s2a.proto.Identity.AttributesEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_internal_proto_common_common_proto_init() } +func file_internal_proto_common_common_proto_init() { + if File_internal_proto_common_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Identity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Identity_SpiffeId)(nil), + (*Identity_Hostname)(nil), + (*Identity_Uid)(nil), + (*Identity_MdbUsername)(nil), + (*Identity_GaiaId)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_common_common_proto_rawDesc, + NumEnums: 2, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_internal_proto_common_common_proto_goTypes, + DependencyIndexes: file_internal_proto_common_common_proto_depIdxs, + EnumInfos: file_internal_proto_common_common_proto_enumTypes, + MessageInfos: file_internal_proto_common_common_proto_msgTypes, + }.Build() + File_internal_proto_common_common_proto = out.File + file_internal_proto_common_common_proto_rawDesc = nil + file_internal_proto_common_common_proto_goTypes = nil + file_internal_proto_common_common_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go new file mode 100644 index 00000000..f4f763ae --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go @@ -0,0 +1,267 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/s2a_context/s2a_context.proto + +package s2a_context_go_proto + +import ( + common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type S2AContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocol negotiated for this connection, e.g., 'grpc'. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The TLS version number that the S2A's handshaker module used to set up the + // session. + TlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=tls_version,json=tlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"tls_version,omitempty"` + // The TLS ciphersuite negotiated by the S2A's handshaker module. + Ciphersuite common_go_proto.Ciphersuite `protobuf:"varint,3,opt,name=ciphersuite,proto3,enum=s2a.proto.Ciphersuite" json:"ciphersuite,omitempty"` + // The authenticated identity of the peer. + PeerIdentity *common_go_proto.Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` + // The local identity used during session setup. This could be: + // - The local identity that the client specifies in ClientSessionStartReq. + // - One of the local identities that the server specifies in + // ServerSessionStartReq. + // - If neither client or server specifies local identities, the S2A picks the + // default one. In this case, this field will contain that identity. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The SHA256 hash of the peer certificate used in the handshake. + PeerCertFingerprint []byte `protobuf:"bytes,6,opt,name=peer_cert_fingerprint,json=peerCertFingerprint,proto3" json:"peer_cert_fingerprint,omitempty"` + // The SHA256 hash of the local certificate used in the handshake. + LocalCertFingerprint []byte `protobuf:"bytes,7,opt,name=local_cert_fingerprint,json=localCertFingerprint,proto3" json:"local_cert_fingerprint,omitempty"` + // Set to true if a cached session was reused to resume the handshake. + IsHandshakeResumed bool `protobuf:"varint,8,opt,name=is_handshake_resumed,json=isHandshakeResumed,proto3" json:"is_handshake_resumed,omitempty"` +} + +func (x *S2AContext) Reset() { + *x = S2AContext{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *S2AContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S2AContext) ProtoMessage() {} + +func (x *S2AContext) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S2AContext.ProtoReflect.Descriptor instead. +func (*S2AContext) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP(), []int{0} +} + +func (x *S2AContext) GetApplicationProtocol() string { + if x != nil { + return x.ApplicationProtocol + } + return "" +} + +func (x *S2AContext) GetTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.TlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *S2AContext) GetCiphersuite() common_go_proto.Ciphersuite { + if x != nil { + return x.Ciphersuite + } + return common_go_proto.Ciphersuite(0) +} + +func (x *S2AContext) GetPeerIdentity() *common_go_proto.Identity { + if x != nil { + return x.PeerIdentity + } + return nil +} + +func (x *S2AContext) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *S2AContext) GetPeerCertFingerprint() []byte { + if x != nil { + return x.PeerCertFingerprint + } + return nil +} + +func (x *S2AContext) GetLocalCertFingerprint() []byte { + if x != nil { + return x.LocalCertFingerprint + } + return nil +} + +func (x *S2AContext) GetIsHandshakeResumed() bool { + if x != nil { + return x.IsHandshakeResumed + } + return false +} + +var File_internal_proto_s2a_context_s2a_context_proto protoreflect.FileDescriptor + +var file_internal_proto_s2a_context_s2a_context_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x03, + 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x31, 0x0a, 0x14, + 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, + 0x36, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x52, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, + 0x65, 0x12, 0x38, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, + 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x65, 0x72, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, + 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, + 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x6f, 0x63, + 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, + 0x74, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, + 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x12, 0x69, 0x73, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x75, + 0x6d, 0x65, 0x64, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x32, 0x61, 0x5f, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_s2a_context_s2a_context_proto_rawDescOnce sync.Once + file_internal_proto_s2a_context_s2a_context_proto_rawDescData = file_internal_proto_s2a_context_s2a_context_proto_rawDesc +) + +func file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP() []byte { + file_internal_proto_s2a_context_s2a_context_proto_rawDescOnce.Do(func() { + file_internal_proto_s2a_context_s2a_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_s2a_context_s2a_context_proto_rawDescData) + }) + return file_internal_proto_s2a_context_s2a_context_proto_rawDescData +} + +var file_internal_proto_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []interface{}{ + (*S2AContext)(nil), // 0: s2a.proto.S2AContext + (common_go_proto.TLSVersion)(0), // 1: s2a.proto.TLSVersion + (common_go_proto.Ciphersuite)(0), // 2: s2a.proto.Ciphersuite + (*common_go_proto.Identity)(nil), // 3: s2a.proto.Identity +} +var file_internal_proto_s2a_context_s2a_context_proto_depIdxs = []int32{ + 1, // 0: s2a.proto.S2AContext.tls_version:type_name -> s2a.proto.TLSVersion + 2, // 1: s2a.proto.S2AContext.ciphersuite:type_name -> s2a.proto.Ciphersuite + 3, // 2: s2a.proto.S2AContext.peer_identity:type_name -> s2a.proto.Identity + 3, // 3: s2a.proto.S2AContext.local_identity:type_name -> s2a.proto.Identity + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_internal_proto_s2a_context_s2a_context_proto_init() } +func file_internal_proto_s2a_context_s2a_context_proto_init() { + if File_internal_proto_s2a_context_s2a_context_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S2AContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_s2a_context_s2a_context_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_internal_proto_s2a_context_s2a_context_proto_goTypes, + DependencyIndexes: file_internal_proto_s2a_context_s2a_context_proto_depIdxs, + MessageInfos: file_internal_proto_s2a_context_s2a_context_proto_msgTypes, + }.Build() + File_internal_proto_s2a_context_s2a_context_proto = out.File + file_internal_proto_s2a_context_s2a_context_proto_rawDesc = nil + file_internal_proto_s2a_context_s2a_context_proto_goTypes = nil + file_internal_proto_s2a_context_s2a_context_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go new file mode 100644 index 00000000..0a86ebee --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go @@ -0,0 +1,1377 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/s2a/s2a.proto + +package s2a_go_proto + +import ( + common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AuthenticationMechanism struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // (Optional) Application may specify an identity associated to an + // authentication mechanism. Otherwise, S2A assumes that the authentication + // mechanism is associated with the default identity. If the default identity + // cannot be determined, session setup fails. + Identity *common_go_proto.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + // Types that are assignable to MechanismOneof: + // + // *AuthenticationMechanism_Token + MechanismOneof isAuthenticationMechanism_MechanismOneof `protobuf_oneof:"mechanism_oneof"` +} + +func (x *AuthenticationMechanism) Reset() { + *x = AuthenticationMechanism{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuthenticationMechanism) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthenticationMechanism) ProtoMessage() {} + +func (x *AuthenticationMechanism) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthenticationMechanism.ProtoReflect.Descriptor instead. +func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{0} +} + +func (x *AuthenticationMechanism) GetIdentity() *common_go_proto.Identity { + if x != nil { + return x.Identity + } + return nil +} + +func (m *AuthenticationMechanism) GetMechanismOneof() isAuthenticationMechanism_MechanismOneof { + if m != nil { + return m.MechanismOneof + } + return nil +} + +func (x *AuthenticationMechanism) GetToken() string { + if x, ok := x.GetMechanismOneof().(*AuthenticationMechanism_Token); ok { + return x.Token + } + return "" +} + +type isAuthenticationMechanism_MechanismOneof interface { + isAuthenticationMechanism_MechanismOneof() +} + +type AuthenticationMechanism_Token struct { + // A token that the application uses to authenticate itself to the S2A. + Token string `protobuf:"bytes,2,opt,name=token,proto3,oneof"` +} + +func (*AuthenticationMechanism_Token) isAuthenticationMechanism_MechanismOneof() {} + +type ClientSessionStartReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocols supported by the client, e.g., "grpc". + ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // (Optional) The minimum TLS version number that the S2A's handshaker module + // will use to set up the session. If this field is not provided, S2A will use + // the minimum version it supports. + MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"min_tls_version,omitempty"` + // (Optional) The maximum TLS version number that the S2A's handshaker module + // will use to set up the session. If this field is not provided, S2A will use + // the maximum version it supports. + MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"max_tls_version,omitempty"` + // The TLS ciphersuites that the client is willing to support. + TlsCiphersuites []common_go_proto.Ciphersuite `protobuf:"varint,4,rep,packed,name=tls_ciphersuites,json=tlsCiphersuites,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuites,omitempty"` + // (Optional) Describes which server identities are acceptable by the client. + // If target identities are provided and none of them matches the peer + // identity of the server, session setup fails. + TargetIdentities []*common_go_proto.Identity `protobuf:"bytes,5,rep,name=target_identities,json=targetIdentities,proto3" json:"target_identities,omitempty"` + // (Optional) Application may specify a local identity. Otherwise, S2A chooses + // the default local identity. If the default identity cannot be determined, + // session setup fails. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,6,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The target name that is used by S2A to configure SNI in the TLS handshake. + // It is also used to perform server authorization check if avaiable. This + // check is intended to verify that the peer authenticated identity is + // authorized to run a service with the target name. + // This field MUST only contain the host portion of the server address. It + // MUST not contain the scheme or the port number. For example, if the server + // address is dns://www.example.com:443, the value of this field should be + // set to www.example.com. + TargetName string `protobuf:"bytes,7,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"` +} + +func (x *ClientSessionStartReq) Reset() { + *x = ClientSessionStartReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientSessionStartReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientSessionStartReq) ProtoMessage() {} + +func (x *ClientSessionStartReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientSessionStartReq.ProtoReflect.Descriptor instead. +func (*ClientSessionStartReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientSessionStartReq) GetApplicationProtocols() []string { + if x != nil { + return x.ApplicationProtocols + } + return nil +} + +func (x *ClientSessionStartReq) GetMinTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MinTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *ClientSessionStartReq) GetMaxTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MaxTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *ClientSessionStartReq) GetTlsCiphersuites() []common_go_proto.Ciphersuite { + if x != nil { + return x.TlsCiphersuites + } + return nil +} + +func (x *ClientSessionStartReq) GetTargetIdentities() []*common_go_proto.Identity { + if x != nil { + return x.TargetIdentities + } + return nil +} + +func (x *ClientSessionStartReq) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *ClientSessionStartReq) GetTargetName() string { + if x != nil { + return x.TargetName + } + return "" +} + +type ServerSessionStartReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocols supported by the server, e.g., "grpc". + ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"` + // (Optional) The minimum TLS version number that the S2A's handshaker module + // will use to set up the session. If this field is not provided, S2A will use + // the minimum version it supports. + MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"min_tls_version,omitempty"` + // (Optional) The maximum TLS version number that the S2A's handshaker module + // will use to set up the session. If this field is not provided, S2A will use + // the maximum version it supports. + MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"max_tls_version,omitempty"` + // The TLS ciphersuites that the server is willing to support. + TlsCiphersuites []common_go_proto.Ciphersuite `protobuf:"varint,4,rep,packed,name=tls_ciphersuites,json=tlsCiphersuites,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuites,omitempty"` + // (Optional) A list of local identities supported by the server, if + // specified. Otherwise, S2A chooses the default local identity. If the + // default identity cannot be determined, session setup fails. + LocalIdentities []*common_go_proto.Identity `protobuf:"bytes,5,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"` + // The byte representation of the first handshake message received from the + // client peer. It is possible that this first message is split into multiple + // chunks. In this case, the first chunk is sent using this field and the + // following chunks are sent using the in_bytes field of SessionNextReq + // Specifically, if the client peer is using S2A, this field contains the + // bytes in the out_frames field of SessionResp message that the client peer + // received from its S2A after initiating the handshake. + InBytes []byte `protobuf:"bytes,6,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` +} + +func (x *ServerSessionStartReq) Reset() { + *x = ServerSessionStartReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerSessionStartReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerSessionStartReq) ProtoMessage() {} + +func (x *ServerSessionStartReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerSessionStartReq.ProtoReflect.Descriptor instead. +func (*ServerSessionStartReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerSessionStartReq) GetApplicationProtocols() []string { + if x != nil { + return x.ApplicationProtocols + } + return nil +} + +func (x *ServerSessionStartReq) GetMinTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MinTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *ServerSessionStartReq) GetMaxTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MaxTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *ServerSessionStartReq) GetTlsCiphersuites() []common_go_proto.Ciphersuite { + if x != nil { + return x.TlsCiphersuites + } + return nil +} + +func (x *ServerSessionStartReq) GetLocalIdentities() []*common_go_proto.Identity { + if x != nil { + return x.LocalIdentities + } + return nil +} + +func (x *ServerSessionStartReq) GetInBytes() []byte { + if x != nil { + return x.InBytes + } + return nil +} + +type SessionNextReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The byte representation of session setup, i.e., handshake messages. + // Specifically: + // - All handshake messages sent from the server to the client. + // - All, except for the first, handshake messages sent from the client to + // the server. Note that the first message is communicated to S2A using the + // in_bytes field of ServerSessionStartReq. + // + // If the peer is using S2A, this field contains the bytes in the out_frames + // field of SessionResp message that the peer received from its S2A. + InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` +} + +func (x *SessionNextReq) Reset() { + *x = SessionNextReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionNextReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionNextReq) ProtoMessage() {} + +func (x *SessionNextReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionNextReq.ProtoReflect.Descriptor instead. +func (*SessionNextReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{3} +} + +func (x *SessionNextReq) GetInBytes() []byte { + if x != nil { + return x.InBytes + } + return nil +} + +type ResumptionTicketReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The byte representation of a NewSessionTicket message received from the + // server. + InBytes [][]byte `protobuf:"bytes,1,rep,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` + // A connection identifier that was created and sent by S2A at the end of a + // handshake. + ConnectionId uint64 `protobuf:"varint,2,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` + // The local identity that was used by S2A during session setup and included + // in |SessionResult|. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,3,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` +} + +func (x *ResumptionTicketReq) Reset() { + *x = ResumptionTicketReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResumptionTicketReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResumptionTicketReq) ProtoMessage() {} + +func (x *ResumptionTicketReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResumptionTicketReq.ProtoReflect.Descriptor instead. +func (*ResumptionTicketReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{4} +} + +func (x *ResumptionTicketReq) GetInBytes() [][]byte { + if x != nil { + return x.InBytes + } + return nil +} + +func (x *ResumptionTicketReq) GetConnectionId() uint64 { + if x != nil { + return x.ConnectionId + } + return 0 +} + +func (x *ResumptionTicketReq) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +type SessionReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to ReqOneof: + // + // *SessionReq_ClientStart + // *SessionReq_ServerStart + // *SessionReq_Next + // *SessionReq_ResumptionTicket + ReqOneof isSessionReq_ReqOneof `protobuf_oneof:"req_oneof"` + // (Optional) The authentication mechanisms that the client wishes to use to + // authenticate to the S2A, ordered by preference. The S2A will always use the + // first authentication mechanism that appears in the list and is supported by + // the S2A. + AuthMechanisms []*AuthenticationMechanism `protobuf:"bytes,5,rep,name=auth_mechanisms,json=authMechanisms,proto3" json:"auth_mechanisms,omitempty"` +} + +func (x *SessionReq) Reset() { + *x = SessionReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionReq) ProtoMessage() {} + +func (x *SessionReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionReq.ProtoReflect.Descriptor instead. +func (*SessionReq) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{5} +} + +func (m *SessionReq) GetReqOneof() isSessionReq_ReqOneof { + if m != nil { + return m.ReqOneof + } + return nil +} + +func (x *SessionReq) GetClientStart() *ClientSessionStartReq { + if x, ok := x.GetReqOneof().(*SessionReq_ClientStart); ok { + return x.ClientStart + } + return nil +} + +func (x *SessionReq) GetServerStart() *ServerSessionStartReq { + if x, ok := x.GetReqOneof().(*SessionReq_ServerStart); ok { + return x.ServerStart + } + return nil +} + +func (x *SessionReq) GetNext() *SessionNextReq { + if x, ok := x.GetReqOneof().(*SessionReq_Next); ok { + return x.Next + } + return nil +} + +func (x *SessionReq) GetResumptionTicket() *ResumptionTicketReq { + if x, ok := x.GetReqOneof().(*SessionReq_ResumptionTicket); ok { + return x.ResumptionTicket + } + return nil +} + +func (x *SessionReq) GetAuthMechanisms() []*AuthenticationMechanism { + if x != nil { + return x.AuthMechanisms + } + return nil +} + +type isSessionReq_ReqOneof interface { + isSessionReq_ReqOneof() +} + +type SessionReq_ClientStart struct { + // The client session setup request message. + ClientStart *ClientSessionStartReq `protobuf:"bytes,1,opt,name=client_start,json=clientStart,proto3,oneof"` +} + +type SessionReq_ServerStart struct { + // The server session setup request message. + ServerStart *ServerSessionStartReq `protobuf:"bytes,2,opt,name=server_start,json=serverStart,proto3,oneof"` +} + +type SessionReq_Next struct { + // The next session setup message request message. + Next *SessionNextReq `protobuf:"bytes,3,opt,name=next,proto3,oneof"` +} + +type SessionReq_ResumptionTicket struct { + // The resumption ticket that is received from the server. This message is + // only accepted by S2A if it is running as a client and if it is received + // after session setup is complete. If S2A is running as a server and it + // receives this message, the session is terminated. + ResumptionTicket *ResumptionTicketReq `protobuf:"bytes,4,opt,name=resumption_ticket,json=resumptionTicket,proto3,oneof"` +} + +func (*SessionReq_ClientStart) isSessionReq_ReqOneof() {} + +func (*SessionReq_ServerStart) isSessionReq_ReqOneof() {} + +func (*SessionReq_Next) isSessionReq_ReqOneof() {} + +func (*SessionReq_ResumptionTicket) isSessionReq_ReqOneof() {} + +type SessionState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The TLS version number that the S2A's handshaker module used to set up the + // session. + TlsVersion common_go_proto.TLSVersion `protobuf:"varint,1,opt,name=tls_version,json=tlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"tls_version,omitempty"` + // The TLS ciphersuite negotiated by the S2A's handshaker module. + TlsCiphersuite common_go_proto.Ciphersuite `protobuf:"varint,2,opt,name=tls_ciphersuite,json=tlsCiphersuite,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuite,omitempty"` + // The sequence number of the next, incoming, TLS record. + InSequence uint64 `protobuf:"varint,3,opt,name=in_sequence,json=inSequence,proto3" json:"in_sequence,omitempty"` + // The sequence number of the next, outgoing, TLS record. + OutSequence uint64 `protobuf:"varint,4,opt,name=out_sequence,json=outSequence,proto3" json:"out_sequence,omitempty"` + // The key for the inbound direction. + InKey []byte `protobuf:"bytes,5,opt,name=in_key,json=inKey,proto3" json:"in_key,omitempty"` + // The key for the outbound direction. + OutKey []byte `protobuf:"bytes,6,opt,name=out_key,json=outKey,proto3" json:"out_key,omitempty"` + // The constant part of the record nonce for the outbound direction. + InFixedNonce []byte `protobuf:"bytes,7,opt,name=in_fixed_nonce,json=inFixedNonce,proto3" json:"in_fixed_nonce,omitempty"` + // The constant part of the record nonce for the inbound direction. + OutFixedNonce []byte `protobuf:"bytes,8,opt,name=out_fixed_nonce,json=outFixedNonce,proto3" json:"out_fixed_nonce,omitempty"` + // A connection identifier that can be provided to S2A to perform operations + // related to this connection. This identifier will be stored by the record + // protocol, and included in the |ResumptionTicketReq| message that is later + // sent back to S2A. This field is set only for client-side connections. + ConnectionId uint64 `protobuf:"varint,9,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` + // Set to true if a cached session was reused to do an abbreviated handshake. + IsHandshakeResumed bool `protobuf:"varint,10,opt,name=is_handshake_resumed,json=isHandshakeResumed,proto3" json:"is_handshake_resumed,omitempty"` +} + +func (x *SessionState) Reset() { + *x = SessionState{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionState) ProtoMessage() {} + +func (x *SessionState) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionState.ProtoReflect.Descriptor instead. +func (*SessionState) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{6} +} + +func (x *SessionState) GetTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.TlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *SessionState) GetTlsCiphersuite() common_go_proto.Ciphersuite { + if x != nil { + return x.TlsCiphersuite + } + return common_go_proto.Ciphersuite(0) +} + +func (x *SessionState) GetInSequence() uint64 { + if x != nil { + return x.InSequence + } + return 0 +} + +func (x *SessionState) GetOutSequence() uint64 { + if x != nil { + return x.OutSequence + } + return 0 +} + +func (x *SessionState) GetInKey() []byte { + if x != nil { + return x.InKey + } + return nil +} + +func (x *SessionState) GetOutKey() []byte { + if x != nil { + return x.OutKey + } + return nil +} + +func (x *SessionState) GetInFixedNonce() []byte { + if x != nil { + return x.InFixedNonce + } + return nil +} + +func (x *SessionState) GetOutFixedNonce() []byte { + if x != nil { + return x.OutFixedNonce + } + return nil +} + +func (x *SessionState) GetConnectionId() uint64 { + if x != nil { + return x.ConnectionId + } + return 0 +} + +func (x *SessionState) GetIsHandshakeResumed() bool { + if x != nil { + return x.IsHandshakeResumed + } + return false +} + +type SessionResult struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The application protocol negotiated for this session. + ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"` + // The session state at the end. This state contains all cryptographic + // material required to initialize the record protocol object. + State *SessionState `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + // The authenticated identity of the peer. + PeerIdentity *common_go_proto.Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"` + // The local identity used during session setup. This could be: + // - The local identity that the client specifies in ClientSessionStartReq. + // - One of the local identities that the server specifies in + // ServerSessionStartReq. + // - If neither client or server specifies local identities, the S2A picks the + // default one. In this case, this field will contain that identity. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The SHA256 hash of the local certificate used in the handshake. + LocalCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_cert_fingerprint,json=localCertFingerprint,proto3" json:"local_cert_fingerprint,omitempty"` + // The SHA256 hash of the peer certificate used in the handshake. + PeerCertFingerprint []byte `protobuf:"bytes,7,opt,name=peer_cert_fingerprint,json=peerCertFingerprint,proto3" json:"peer_cert_fingerprint,omitempty"` +} + +func (x *SessionResult) Reset() { + *x = SessionResult{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionResult) ProtoMessage() {} + +func (x *SessionResult) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionResult.ProtoReflect.Descriptor instead. +func (*SessionResult) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{7} +} + +func (x *SessionResult) GetApplicationProtocol() string { + if x != nil { + return x.ApplicationProtocol + } + return "" +} + +func (x *SessionResult) GetState() *SessionState { + if x != nil { + return x.State + } + return nil +} + +func (x *SessionResult) GetPeerIdentity() *common_go_proto.Identity { + if x != nil { + return x.PeerIdentity + } + return nil +} + +func (x *SessionResult) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *SessionResult) GetLocalCertFingerprint() []byte { + if x != nil { + return x.LocalCertFingerprint + } + return nil +} + +func (x *SessionResult) GetPeerCertFingerprint() []byte { + if x != nil { + return x.PeerCertFingerprint + } + return nil +} + +type SessionStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code that is specific to the application and the implementation + // of S2A, e.g., gRPC status code. + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // The status details. + Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` +} + +func (x *SessionStatus) Reset() { + *x = SessionStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionStatus) ProtoMessage() {} + +func (x *SessionStatus) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionStatus.ProtoReflect.Descriptor instead. +func (*SessionStatus) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{8} +} + +func (x *SessionStatus) GetCode() uint32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *SessionStatus) GetDetails() string { + if x != nil { + return x.Details + } + return "" +} + +type SessionResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The local identity used during session setup. This could be: + // - The local identity that the client specifies in ClientSessionStartReq. + // - One of the local identities that the server specifies in + // ServerSessionStartReq. + // - If neither client or server specifies local identities, the S2A picks the + // default one. In this case, this field will contain that identity. + // + // If the SessionResult is populated, then this must coincide with the local + // identity specified in the SessionResult; otherwise, the handshake must + // fail. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The byte representation of the frames that should be sent to the peer. May + // be empty if nothing needs to be sent to the peer or if in_bytes in the + // SessionReq is incomplete. All bytes in a non-empty out_frames must be sent + // to the peer even if the session setup status is not OK as these frames may + // contain appropriate alerts. + OutFrames []byte `protobuf:"bytes,2,opt,name=out_frames,json=outFrames,proto3" json:"out_frames,omitempty"` + // Number of bytes in the in_bytes field that are consumed by S2A. It is + // possible that part of in_bytes is unrelated to the session setup process. + BytesConsumed uint32 `protobuf:"varint,3,opt,name=bytes_consumed,json=bytesConsumed,proto3" json:"bytes_consumed,omitempty"` + // This is set if the session is successfully set up. out_frames may + // still be set to frames that needs to be forwarded to the peer. + Result *SessionResult `protobuf:"bytes,4,opt,name=result,proto3" json:"result,omitempty"` + // Status of session setup at the current stage. + Status *SessionStatus `protobuf:"bytes,5,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *SessionResp) Reset() { + *x = SessionResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionResp) ProtoMessage() {} + +func (x *SessionResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_s2a_s2a_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionResp.ProtoReflect.Descriptor instead. +func (*SessionResp) Descriptor() ([]byte, []int) { + return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{9} +} + +func (x *SessionResp) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *SessionResp) GetOutFrames() []byte { + if x != nil { + return x.OutFrames + } + return nil +} + +func (x *SessionResp) GetBytesConsumed() uint32 { + if x != nil { + return x.BytesConsumed + } + return 0 +} + +func (x *SessionResp) GetResult() *SessionResult { + if x != nil { + return x.Result + } + return nil +} + +func (x *SessionResp) GetStatus() *SessionStatus { + if x != nil { + return x.Status + } + return nil +} + +var File_internal_proto_s2a_s2a_proto protoreflect.FileDescriptor + +var file_internal_proto_s2a_s2a_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x75, 0x0a, + 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, + 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xac, 0x03, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x12, 0x33, + 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, + 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, + 0x6f, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x41, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x11, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x52, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, + 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, + 0x61, 0x6d, 0x65, 0x22, 0xe8, 0x02, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a, + 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x41, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x2b, + 0x0a, 0x0e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x65, 0x78, 0x74, 0x52, 0x65, 0x71, + 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x13, + 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, + 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22, + 0xf4, 0x02, 0x0a, 0x0a, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, + 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x2f, 0x0a, 0x04, + 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x65, + 0x78, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x12, 0x4d, 0x0a, + 0x11, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x63, 0x6b, + 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x10, 0x72, 0x65, 0x73, 0x75, + 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x4b, 0x0a, 0x0f, + 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x4d, + 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, + 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xa0, 0x03, 0x0a, 0x0c, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, + 0x3f, 0x0a, 0x0f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, + 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x69, 0x6e, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x69, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x6f, + 0x75, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75, + 0x74, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x69, 0x6e, 0x5f, 0x66, 0x69, 0x78, 0x65, 0x64, + 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x69, 0x6e, + 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6f, 0x75, + 0x74, 0x5f, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6f, 0x75, 0x74, 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e, 0x6f, 0x6e, + 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x68, 0x61, + 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x22, 0xd1, 0x02, 0x0a, 0x0d, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, + 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2d, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x38, 0x0a, + 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, + 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, + 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x65, + 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, + 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, + 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x22, 0x3d, 0x0a, + 0x0d, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xf3, 0x01, 0x0a, + 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3a, 0x0a, 0x0e, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, + 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, + 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x30, + 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x30, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x32, 0x51, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x43, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22, + 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x32, + 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_internal_proto_s2a_s2a_proto_rawDescOnce sync.Once + file_internal_proto_s2a_s2a_proto_rawDescData = file_internal_proto_s2a_s2a_proto_rawDesc +) + +func file_internal_proto_s2a_s2a_proto_rawDescGZIP() []byte { + file_internal_proto_s2a_s2a_proto_rawDescOnce.Do(func() { + file_internal_proto_s2a_s2a_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_s2a_s2a_proto_rawDescData) + }) + return file_internal_proto_s2a_s2a_proto_rawDescData +} + +var file_internal_proto_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_internal_proto_s2a_s2a_proto_goTypes = []interface{}{ + (*AuthenticationMechanism)(nil), // 0: s2a.proto.AuthenticationMechanism + (*ClientSessionStartReq)(nil), // 1: s2a.proto.ClientSessionStartReq + (*ServerSessionStartReq)(nil), // 2: s2a.proto.ServerSessionStartReq + (*SessionNextReq)(nil), // 3: s2a.proto.SessionNextReq + (*ResumptionTicketReq)(nil), // 4: s2a.proto.ResumptionTicketReq + (*SessionReq)(nil), // 5: s2a.proto.SessionReq + (*SessionState)(nil), // 6: s2a.proto.SessionState + (*SessionResult)(nil), // 7: s2a.proto.SessionResult + (*SessionStatus)(nil), // 8: s2a.proto.SessionStatus + (*SessionResp)(nil), // 9: s2a.proto.SessionResp + (*common_go_proto.Identity)(nil), // 10: s2a.proto.Identity + (common_go_proto.TLSVersion)(0), // 11: s2a.proto.TLSVersion + (common_go_proto.Ciphersuite)(0), // 12: s2a.proto.Ciphersuite +} +var file_internal_proto_s2a_s2a_proto_depIdxs = []int32{ + 10, // 0: s2a.proto.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity + 11, // 1: s2a.proto.ClientSessionStartReq.min_tls_version:type_name -> s2a.proto.TLSVersion + 11, // 2: s2a.proto.ClientSessionStartReq.max_tls_version:type_name -> s2a.proto.TLSVersion + 12, // 3: s2a.proto.ClientSessionStartReq.tls_ciphersuites:type_name -> s2a.proto.Ciphersuite + 10, // 4: s2a.proto.ClientSessionStartReq.target_identities:type_name -> s2a.proto.Identity + 10, // 5: s2a.proto.ClientSessionStartReq.local_identity:type_name -> s2a.proto.Identity + 11, // 6: s2a.proto.ServerSessionStartReq.min_tls_version:type_name -> s2a.proto.TLSVersion + 11, // 7: s2a.proto.ServerSessionStartReq.max_tls_version:type_name -> s2a.proto.TLSVersion + 12, // 8: s2a.proto.ServerSessionStartReq.tls_ciphersuites:type_name -> s2a.proto.Ciphersuite + 10, // 9: s2a.proto.ServerSessionStartReq.local_identities:type_name -> s2a.proto.Identity + 10, // 10: s2a.proto.ResumptionTicketReq.local_identity:type_name -> s2a.proto.Identity + 1, // 11: s2a.proto.SessionReq.client_start:type_name -> s2a.proto.ClientSessionStartReq + 2, // 12: s2a.proto.SessionReq.server_start:type_name -> s2a.proto.ServerSessionStartReq + 3, // 13: s2a.proto.SessionReq.next:type_name -> s2a.proto.SessionNextReq + 4, // 14: s2a.proto.SessionReq.resumption_ticket:type_name -> s2a.proto.ResumptionTicketReq + 0, // 15: s2a.proto.SessionReq.auth_mechanisms:type_name -> s2a.proto.AuthenticationMechanism + 11, // 16: s2a.proto.SessionState.tls_version:type_name -> s2a.proto.TLSVersion + 12, // 17: s2a.proto.SessionState.tls_ciphersuite:type_name -> s2a.proto.Ciphersuite + 6, // 18: s2a.proto.SessionResult.state:type_name -> s2a.proto.SessionState + 10, // 19: s2a.proto.SessionResult.peer_identity:type_name -> s2a.proto.Identity + 10, // 20: s2a.proto.SessionResult.local_identity:type_name -> s2a.proto.Identity + 10, // 21: s2a.proto.SessionResp.local_identity:type_name -> s2a.proto.Identity + 7, // 22: s2a.proto.SessionResp.result:type_name -> s2a.proto.SessionResult + 8, // 23: s2a.proto.SessionResp.status:type_name -> s2a.proto.SessionStatus + 5, // 24: s2a.proto.S2AService.SetUpSession:input_type -> s2a.proto.SessionReq + 9, // 25: s2a.proto.S2AService.SetUpSession:output_type -> s2a.proto.SessionResp + 25, // [25:26] is the sub-list for method output_type + 24, // [24:25] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name +} + +func init() { file_internal_proto_s2a_s2a_proto_init() } +func file_internal_proto_s2a_s2a_proto_init() { + if File_internal_proto_s2a_s2a_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthenticationMechanism); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientSessionStartReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerSessionStartReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionNextReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResumptionTicketReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionResult); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*AuthenticationMechanism_Token)(nil), + } + file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*SessionReq_ClientStart)(nil), + (*SessionReq_ServerStart)(nil), + (*SessionReq_Next)(nil), + (*SessionReq_ResumptionTicket)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_s2a_s2a_proto_rawDesc, + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_internal_proto_s2a_s2a_proto_goTypes, + DependencyIndexes: file_internal_proto_s2a_s2a_proto_depIdxs, + MessageInfos: file_internal_proto_s2a_s2a_proto_msgTypes, + }.Build() + File_internal_proto_s2a_s2a_proto = out.File + file_internal_proto_s2a_s2a_proto_rawDesc = nil + file_internal_proto_s2a_s2a_proto_goTypes = nil + file_internal_proto_s2a_s2a_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go new file mode 100644 index 00000000..0fa582fc --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go @@ -0,0 +1,173 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.21.12 +// source: internal/proto/s2a/s2a.proto + +package s2a_go_proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + S2AService_SetUpSession_FullMethodName = "/s2a.proto.S2AService/SetUpSession" +) + +// S2AServiceClient is the client API for S2AService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type S2AServiceClient interface { + // S2A service accepts a stream of session setup requests and returns a stream + // of session setup responses. The client of this service is expected to send + // exactly one client_start or server_start message followed by at least one + // next message. Applications running TLS clients can send requests with + // resumption_ticket messages only after the session is successfully set up. + // + // Every time S2A client sends a request, this service sends a response. + // However, clients do not have to wait for service response before sending + // the next request. + SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) +} + +type s2AServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { + return &s2AServiceClient{cc} +} + +func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &s2AServiceSetUpSessionClient{stream} + return x, nil +} + +type S2AService_SetUpSessionClient interface { + Send(*SessionReq) error + Recv() (*SessionResp, error) + grpc.ClientStream +} + +type s2AServiceSetUpSessionClient struct { + grpc.ClientStream +} + +func (x *s2AServiceSetUpSessionClient) Send(m *SessionReq) error { + return x.ClientStream.SendMsg(m) +} + +func (x *s2AServiceSetUpSessionClient) Recv() (*SessionResp, error) { + m := new(SessionResp) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// S2AServiceServer is the server API for S2AService service. +// All implementations must embed UnimplementedS2AServiceServer +// for forward compatibility +type S2AServiceServer interface { + // S2A service accepts a stream of session setup requests and returns a stream + // of session setup responses. The client of this service is expected to send + // exactly one client_start or server_start message followed by at least one + // next message. Applications running TLS clients can send requests with + // resumption_ticket messages only after the session is successfully set up. + // + // Every time S2A client sends a request, this service sends a response. + // However, clients do not have to wait for service response before sending + // the next request. + SetUpSession(S2AService_SetUpSessionServer) error + mustEmbedUnimplementedS2AServiceServer() +} + +// UnimplementedS2AServiceServer must be embedded to have forward compatible implementations. +type UnimplementedS2AServiceServer struct { +} + +func (UnimplementedS2AServiceServer) SetUpSession(S2AService_SetUpSessionServer) error { + return status.Errorf(codes.Unimplemented, "method SetUpSession not implemented") +} +func (UnimplementedS2AServiceServer) mustEmbedUnimplementedS2AServiceServer() {} + +// UnsafeS2AServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to S2AServiceServer will +// result in compilation errors. +type UnsafeS2AServiceServer interface { + mustEmbedUnimplementedS2AServiceServer() +} + +func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { + s.RegisterService(&S2AService_ServiceDesc, srv) +} + +func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) +} + +type S2AService_SetUpSessionServer interface { + Send(*SessionResp) error + Recv() (*SessionReq, error) + grpc.ServerStream +} + +type s2AServiceSetUpSessionServer struct { + grpc.ServerStream +} + +func (x *s2AServiceSetUpSessionServer) Send(m *SessionResp) error { + return x.ServerStream.SendMsg(m) +} + +func (x *s2AServiceSetUpSessionServer) Recv() (*SessionReq, error) { + m := new(SessionReq) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// S2AService_ServiceDesc is the grpc.ServiceDesc for S2AService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var S2AService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "s2a.proto.S2AService", + HandlerType: (*S2AServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "SetUpSession", + Handler: _S2AService_SetUpSession_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "internal/proto/s2a/s2a.proto", +} diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go new file mode 100644 index 00000000..c84bed97 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go @@ -0,0 +1,367 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/v2/common/common.proto + +package common_go_proto + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The TLS 1.0-1.2 ciphersuites that the application can negotiate when using +// S2A. +type Ciphersuite int32 + +const ( + Ciphersuite_CIPHERSUITE_UNSPECIFIED Ciphersuite = 0 + Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 Ciphersuite = 1 + Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 Ciphersuite = 2 + Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 Ciphersuite = 3 + Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256 Ciphersuite = 4 + Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384 Ciphersuite = 5 + Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 Ciphersuite = 6 +) + +// Enum value maps for Ciphersuite. +var ( + Ciphersuite_name = map[int32]string{ + 0: "CIPHERSUITE_UNSPECIFIED", + 1: "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + 2: "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + 3: "CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", + 4: "CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + 5: "CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + 6: "CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", + } + Ciphersuite_value = map[string]int32{ + "CIPHERSUITE_UNSPECIFIED": 0, + "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": 1, + "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": 2, + "CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": 3, + "CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256": 4, + "CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384": 5, + "CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": 6, + } +) + +func (x Ciphersuite) Enum() *Ciphersuite { + p := new(Ciphersuite) + *p = x + return p +} + +func (x Ciphersuite) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Ciphersuite) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_common_common_proto_enumTypes[0].Descriptor() +} + +func (Ciphersuite) Type() protoreflect.EnumType { + return &file_internal_proto_v2_common_common_proto_enumTypes[0] +} + +func (x Ciphersuite) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Ciphersuite.Descriptor instead. +func (Ciphersuite) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{0} +} + +// The TLS versions supported by S2A's handshaker module. +type TLSVersion int32 + +const ( + TLSVersion_TLS_VERSION_UNSPECIFIED TLSVersion = 0 + TLSVersion_TLS_VERSION_1_0 TLSVersion = 1 + TLSVersion_TLS_VERSION_1_1 TLSVersion = 2 + TLSVersion_TLS_VERSION_1_2 TLSVersion = 3 + TLSVersion_TLS_VERSION_1_3 TLSVersion = 4 +) + +// Enum value maps for TLSVersion. +var ( + TLSVersion_name = map[int32]string{ + 0: "TLS_VERSION_UNSPECIFIED", + 1: "TLS_VERSION_1_0", + 2: "TLS_VERSION_1_1", + 3: "TLS_VERSION_1_2", + 4: "TLS_VERSION_1_3", + } + TLSVersion_value = map[string]int32{ + "TLS_VERSION_UNSPECIFIED": 0, + "TLS_VERSION_1_0": 1, + "TLS_VERSION_1_1": 2, + "TLS_VERSION_1_2": 3, + "TLS_VERSION_1_3": 4, + } +) + +func (x TLSVersion) Enum() *TLSVersion { + p := new(TLSVersion) + *p = x + return p +} + +func (x TLSVersion) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TLSVersion) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_common_common_proto_enumTypes[1].Descriptor() +} + +func (TLSVersion) Type() protoreflect.EnumType { + return &file_internal_proto_v2_common_common_proto_enumTypes[1] +} + +func (x TLSVersion) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TLSVersion.Descriptor instead. +func (TLSVersion) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{1} +} + +// The side in the TLS connection. +type ConnectionSide int32 + +const ( + ConnectionSide_CONNECTION_SIDE_UNSPECIFIED ConnectionSide = 0 + ConnectionSide_CONNECTION_SIDE_CLIENT ConnectionSide = 1 + ConnectionSide_CONNECTION_SIDE_SERVER ConnectionSide = 2 +) + +// Enum value maps for ConnectionSide. +var ( + ConnectionSide_name = map[int32]string{ + 0: "CONNECTION_SIDE_UNSPECIFIED", + 1: "CONNECTION_SIDE_CLIENT", + 2: "CONNECTION_SIDE_SERVER", + } + ConnectionSide_value = map[string]int32{ + "CONNECTION_SIDE_UNSPECIFIED": 0, + "CONNECTION_SIDE_CLIENT": 1, + "CONNECTION_SIDE_SERVER": 2, + } +) + +func (x ConnectionSide) Enum() *ConnectionSide { + p := new(ConnectionSide) + *p = x + return p +} + +func (x ConnectionSide) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ConnectionSide) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_common_common_proto_enumTypes[2].Descriptor() +} + +func (ConnectionSide) Type() protoreflect.EnumType { + return &file_internal_proto_v2_common_common_proto_enumTypes[2] +} + +func (x ConnectionSide) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ConnectionSide.Descriptor instead. +func (ConnectionSide) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{2} +} + +// The ALPN protocols that the application can negotiate during a TLS handshake. +type AlpnProtocol int32 + +const ( + AlpnProtocol_ALPN_PROTOCOL_UNSPECIFIED AlpnProtocol = 0 + AlpnProtocol_ALPN_PROTOCOL_GRPC AlpnProtocol = 1 + AlpnProtocol_ALPN_PROTOCOL_HTTP2 AlpnProtocol = 2 + AlpnProtocol_ALPN_PROTOCOL_HTTP1_1 AlpnProtocol = 3 +) + +// Enum value maps for AlpnProtocol. +var ( + AlpnProtocol_name = map[int32]string{ + 0: "ALPN_PROTOCOL_UNSPECIFIED", + 1: "ALPN_PROTOCOL_GRPC", + 2: "ALPN_PROTOCOL_HTTP2", + 3: "ALPN_PROTOCOL_HTTP1_1", + } + AlpnProtocol_value = map[string]int32{ + "ALPN_PROTOCOL_UNSPECIFIED": 0, + "ALPN_PROTOCOL_GRPC": 1, + "ALPN_PROTOCOL_HTTP2": 2, + "ALPN_PROTOCOL_HTTP1_1": 3, + } +) + +func (x AlpnProtocol) Enum() *AlpnProtocol { + p := new(AlpnProtocol) + *p = x + return p +} + +func (x AlpnProtocol) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AlpnProtocol) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_common_common_proto_enumTypes[3].Descriptor() +} + +func (AlpnProtocol) Type() protoreflect.EnumType { + return &file_internal_proto_v2_common_common_proto_enumTypes[3] +} + +func (x AlpnProtocol) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AlpnProtocol.Descriptor instead. +func (AlpnProtocol) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{3} +} + +var File_internal_proto_v2_common_common_proto protoreflect.FileDescriptor + +var file_internal_proto_v2_common_common_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, + 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, + 0x55, 0x49, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, + 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, + 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, + 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, + 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, + 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35, + 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, + 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, + 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, + 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45, + 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, + 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, + 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, + 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, + 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, + 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a, + 0x33, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, + 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43, + 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, + 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, + 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, + 0x5f, 0x31, 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, + 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54, + 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03, + 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, + 0x31, 0x5f, 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45, + 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, + 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49, + 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, + 0x2a, 0x79, 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x12, 0x1d, 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, + 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x16, 0x0a, 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, + 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f, + 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02, + 0x12, 0x19, 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, + 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, + 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_v2_common_common_proto_rawDescOnce sync.Once + file_internal_proto_v2_common_common_proto_rawDescData = file_internal_proto_v2_common_common_proto_rawDesc +) + +func file_internal_proto_v2_common_common_proto_rawDescGZIP() []byte { + file_internal_proto_v2_common_common_proto_rawDescOnce.Do(func() { + file_internal_proto_v2_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_common_common_proto_rawDescData) + }) + return file_internal_proto_v2_common_common_proto_rawDescData +} + +var file_internal_proto_v2_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_internal_proto_v2_common_common_proto_goTypes = []interface{}{ + (Ciphersuite)(0), // 0: s2a.proto.v2.Ciphersuite + (TLSVersion)(0), // 1: s2a.proto.v2.TLSVersion + (ConnectionSide)(0), // 2: s2a.proto.v2.ConnectionSide + (AlpnProtocol)(0), // 3: s2a.proto.v2.AlpnProtocol +} +var file_internal_proto_v2_common_common_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_internal_proto_v2_common_common_proto_init() } +func file_internal_proto_v2_common_common_proto_init() { + if File_internal_proto_v2_common_common_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_v2_common_common_proto_rawDesc, + NumEnums: 4, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_internal_proto_v2_common_common_proto_goTypes, + DependencyIndexes: file_internal_proto_v2_common_common_proto_depIdxs, + EnumInfos: file_internal_proto_v2_common_common_proto_enumTypes, + }.Build() + File_internal_proto_v2_common_common_proto = out.File + file_internal_proto_v2_common_common_proto_rawDesc = nil + file_internal_proto_v2_common_common_proto_goTypes = nil + file_internal_proto_v2_common_common_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go new file mode 100644 index 00000000..b7fd871c --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go @@ -0,0 +1,248 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/v2/s2a_context/s2a_context.proto + +package s2a_context_go_proto + +import ( + common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type S2AContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The SPIFFE ID from the peer leaf certificate, if present. + // + // This field is only populated if the leaf certificate is a valid SPIFFE + // SVID; in particular, there is a unique URI SAN and this URI SAN is a valid + // SPIFFE ID. + LeafCertSpiffeId string `protobuf:"bytes,1,opt,name=leaf_cert_spiffe_id,json=leafCertSpiffeId,proto3" json:"leaf_cert_spiffe_id,omitempty"` + // The URIs that are present in the SubjectAltName extension of the peer leaf + // certificate. + // + // Note that the extracted URIs are not validated and may not be properly + // formatted. + LeafCertUris []string `protobuf:"bytes,2,rep,name=leaf_cert_uris,json=leafCertUris,proto3" json:"leaf_cert_uris,omitempty"` + // The DNSNames that are present in the SubjectAltName extension of the peer + // leaf certificate. + LeafCertDnsnames []string `protobuf:"bytes,3,rep,name=leaf_cert_dnsnames,json=leafCertDnsnames,proto3" json:"leaf_cert_dnsnames,omitempty"` + // The (ordered) list of fingerprints in the certificate chain used to verify + // the given leaf certificate. The order MUST be from leaf certificate + // fingerprint to root certificate fingerprint. + // + // A fingerprint is the base-64 encoding of the SHA256 hash of the + // DER-encoding of a certificate. The list MAY be populated even if the peer + // certificate chain was NOT validated successfully. + PeerCertificateChainFingerprints []string `protobuf:"bytes,4,rep,name=peer_certificate_chain_fingerprints,json=peerCertificateChainFingerprints,proto3" json:"peer_certificate_chain_fingerprints,omitempty"` + // The local identity used during session setup. + LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The SHA256 hash of the DER-encoding of the local leaf certificate used in + // the handshake. + LocalLeafCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_leaf_cert_fingerprint,json=localLeafCertFingerprint,proto3" json:"local_leaf_cert_fingerprint,omitempty"` +} + +func (x *S2AContext) Reset() { + *x = S2AContext{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *S2AContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S2AContext) ProtoMessage() {} + +func (x *S2AContext) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S2AContext.ProtoReflect.Descriptor instead. +func (*S2AContext) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP(), []int{0} +} + +func (x *S2AContext) GetLeafCertSpiffeId() string { + if x != nil { + return x.LeafCertSpiffeId + } + return "" +} + +func (x *S2AContext) GetLeafCertUris() []string { + if x != nil { + return x.LeafCertUris + } + return nil +} + +func (x *S2AContext) GetLeafCertDnsnames() []string { + if x != nil { + return x.LeafCertDnsnames + } + return nil +} + +func (x *S2AContext) GetPeerCertificateChainFingerprints() []string { + if x != nil { + return x.PeerCertificateChainFingerprints + } + return nil +} + +func (x *S2AContext) GetLocalIdentity() *common_go_proto.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *S2AContext) GetLocalLeafCertFingerprint() []byte { + if x != nil { + return x.LocalLeafCertFingerprint + } + return nil +} + +var File_internal_proto_v2_s2a_context_s2a_context_proto protoreflect.FileDescriptor + +var file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, + 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, + 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xd9, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, + 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, + 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x75, + 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x66, 0x43, + 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x61, 0x66, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x44, 0x6e, 0x73, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, + 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, + 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66, + 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42, + 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescOnce sync.Once + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData = file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc +) + +func file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP() []byte { + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescOnce.Do(func() { + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData) + }) + return file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData +} + +var file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []interface{}{ + (*S2AContext)(nil), // 0: s2a.proto.v2.S2AContext + (*common_go_proto.Identity)(nil), // 1: s2a.proto.Identity +} +var file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = []int32{ + 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.Identity + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_internal_proto_v2_s2a_context_s2a_context_proto_init() } +func file_internal_proto_v2_s2a_context_s2a_context_proto_init() { + if File_internal_proto_v2_s2a_context_s2a_context_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S2AContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes, + DependencyIndexes: file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs, + MessageInfos: file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes, + }.Build() + File_internal_proto_v2_s2a_context_s2a_context_proto = out.File + file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = nil + file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = nil + file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go new file mode 100644 index 00000000..e843450c --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go @@ -0,0 +1,2494 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc v3.21.12 +// source: internal/proto/v2/s2a/s2a.proto + +package s2a_go_proto + +import ( + common_go_proto1 "github.com/google/s2a-go/internal/proto/common_go_proto" + common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto" + s2a_context_go_proto "github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SignatureAlgorithm int32 + +const ( + SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED SignatureAlgorithm = 0 + // RSA Public-Key Cryptography Standards #1. + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256 SignatureAlgorithm = 1 + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384 SignatureAlgorithm = 2 + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512 SignatureAlgorithm = 3 + // ECDSA. + SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256 SignatureAlgorithm = 4 + SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384 SignatureAlgorithm = 5 + SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512 SignatureAlgorithm = 6 + // RSA Probabilistic Signature Scheme. + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256 SignatureAlgorithm = 7 + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384 SignatureAlgorithm = 8 + SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512 SignatureAlgorithm = 9 + // ED25519. + SignatureAlgorithm_S2A_SSL_SIGN_ED25519 SignatureAlgorithm = 10 +) + +// Enum value maps for SignatureAlgorithm. +var ( + SignatureAlgorithm_name = map[int32]string{ + 0: "S2A_SSL_SIGN_UNSPECIFIED", + 1: "S2A_SSL_SIGN_RSA_PKCS1_SHA256", + 2: "S2A_SSL_SIGN_RSA_PKCS1_SHA384", + 3: "S2A_SSL_SIGN_RSA_PKCS1_SHA512", + 4: "S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256", + 5: "S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384", + 6: "S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512", + 7: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256", + 8: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384", + 9: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512", + 10: "S2A_SSL_SIGN_ED25519", + } + SignatureAlgorithm_value = map[string]int32{ + "S2A_SSL_SIGN_UNSPECIFIED": 0, + "S2A_SSL_SIGN_RSA_PKCS1_SHA256": 1, + "S2A_SSL_SIGN_RSA_PKCS1_SHA384": 2, + "S2A_SSL_SIGN_RSA_PKCS1_SHA512": 3, + "S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256": 4, + "S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384": 5, + "S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512": 6, + "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256": 7, + "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384": 8, + "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512": 9, + "S2A_SSL_SIGN_ED25519": 10, + } +) + +func (x SignatureAlgorithm) Enum() *SignatureAlgorithm { + p := new(SignatureAlgorithm) + *p = x + return p +} + +func (x SignatureAlgorithm) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SignatureAlgorithm) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[0].Descriptor() +} + +func (SignatureAlgorithm) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[0] +} + +func (x SignatureAlgorithm) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SignatureAlgorithm.Descriptor instead. +func (SignatureAlgorithm) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{0} +} + +type GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate int32 + +const ( + GetTlsConfigurationResp_ServerTlsConfiguration_UNSPECIFIED GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 0 + GetTlsConfigurationResp_ServerTlsConfiguration_DONT_REQUEST_CLIENT_CERTIFICATE GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 1 + GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 2 + GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 3 + GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 4 + GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 5 +) + +// Enum value maps for GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate. +var ( + GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "DONT_REQUEST_CLIENT_CERTIFICATE", + 2: "REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY", + 3: "REQUEST_CLIENT_CERTIFICATE_AND_VERIFY", + 4: "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY", + 5: "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY", + } + GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate_value = map[string]int32{ + "UNSPECIFIED": 0, + "DONT_REQUEST_CLIENT_CERTIFICATE": 1, + "REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY": 2, + "REQUEST_CLIENT_CERTIFICATE_AND_VERIFY": 3, + "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY": 4, + "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY": 5, + } +) + +func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Enum() *GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate { + p := new(GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) + *p = x + return p +} + +func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[1].Descriptor() +} + +func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[1] +} + +func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate.Descriptor instead. +func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 1, 0} +} + +type OffloadPrivateKeyOperationReq_PrivateKeyOperation int32 + +const ( + OffloadPrivateKeyOperationReq_UNSPECIFIED OffloadPrivateKeyOperationReq_PrivateKeyOperation = 0 + // When performing a TLS 1.2 or 1.3 handshake, the (partial) transcript of + // the TLS handshake must be signed to prove possession of the private key. + // + // See https://www.rfc-editor.org/rfc/rfc8446.html#section-4.4.3. + OffloadPrivateKeyOperationReq_SIGN OffloadPrivateKeyOperationReq_PrivateKeyOperation = 1 + // When performing a TLS 1.2 handshake using an RSA algorithm, the key + // exchange algorithm involves the client generating a premaster secret, + // encrypting it using the server's public key, and sending this encrypted + // blob to the server in a ClientKeyExchange message. + // + // See https://www.rfc-editor.org/rfc/rfc4346#section-7.4.7.1. + OffloadPrivateKeyOperationReq_DECRYPT OffloadPrivateKeyOperationReq_PrivateKeyOperation = 2 +) + +// Enum value maps for OffloadPrivateKeyOperationReq_PrivateKeyOperation. +var ( + OffloadPrivateKeyOperationReq_PrivateKeyOperation_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "SIGN", + 2: "DECRYPT", + } + OffloadPrivateKeyOperationReq_PrivateKeyOperation_value = map[string]int32{ + "UNSPECIFIED": 0, + "SIGN": 1, + "DECRYPT": 2, + } +) + +func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) Enum() *OffloadPrivateKeyOperationReq_PrivateKeyOperation { + p := new(OffloadPrivateKeyOperationReq_PrivateKeyOperation) + *p = x + return p +} + +func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[2].Descriptor() +} + +func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[2] +} + +func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use OffloadPrivateKeyOperationReq_PrivateKeyOperation.Descriptor instead. +func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{5, 0} +} + +type OffloadResumptionKeyOperationReq_ResumptionKeyOperation int32 + +const ( + OffloadResumptionKeyOperationReq_UNSPECIFIED OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 0 + OffloadResumptionKeyOperationReq_ENCRYPT OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 1 + OffloadResumptionKeyOperationReq_DECRYPT OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 2 +) + +// Enum value maps for OffloadResumptionKeyOperationReq_ResumptionKeyOperation. +var ( + OffloadResumptionKeyOperationReq_ResumptionKeyOperation_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "ENCRYPT", + 2: "DECRYPT", + } + OffloadResumptionKeyOperationReq_ResumptionKeyOperation_value = map[string]int32{ + "UNSPECIFIED": 0, + "ENCRYPT": 1, + "DECRYPT": 2, + } +) + +func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Enum() *OffloadResumptionKeyOperationReq_ResumptionKeyOperation { + p := new(OffloadResumptionKeyOperationReq_ResumptionKeyOperation) + *p = x + return p +} + +func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[3].Descriptor() +} + +func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[3] +} + +func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use OffloadResumptionKeyOperationReq_ResumptionKeyOperation.Descriptor instead. +func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{7, 0} +} + +type ValidatePeerCertificateChainReq_VerificationMode int32 + +const ( + // The default verification mode supported by S2A. + ValidatePeerCertificateChainReq_UNSPECIFIED ValidatePeerCertificateChainReq_VerificationMode = 0 + // The SPIFFE verification mode selects the set of trusted certificates to + // use for path building based on the SPIFFE trust domain in the peer's leaf + // certificate. + ValidatePeerCertificateChainReq_SPIFFE ValidatePeerCertificateChainReq_VerificationMode = 1 + // The connect-to-Google verification mode uses the trust bundle for + // connecting to Google, e.g. *.mtls.googleapis.com endpoints. + ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE ValidatePeerCertificateChainReq_VerificationMode = 2 +) + +// Enum value maps for ValidatePeerCertificateChainReq_VerificationMode. +var ( + ValidatePeerCertificateChainReq_VerificationMode_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "SPIFFE", + 2: "CONNECT_TO_GOOGLE", + } + ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{ + "UNSPECIFIED": 0, + "SPIFFE": 1, + "CONNECT_TO_GOOGLE": 2, + } +) + +func (x ValidatePeerCertificateChainReq_VerificationMode) Enum() *ValidatePeerCertificateChainReq_VerificationMode { + p := new(ValidatePeerCertificateChainReq_VerificationMode) + *p = x + return p +} + +func (x ValidatePeerCertificateChainReq_VerificationMode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ValidatePeerCertificateChainReq_VerificationMode) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[4].Descriptor() +} + +func (ValidatePeerCertificateChainReq_VerificationMode) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[4] +} + +func (x ValidatePeerCertificateChainReq_VerificationMode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ValidatePeerCertificateChainReq_VerificationMode.Descriptor instead. +func (ValidatePeerCertificateChainReq_VerificationMode) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 0} +} + +type ValidatePeerCertificateChainResp_ValidationResult int32 + +const ( + ValidatePeerCertificateChainResp_UNSPECIFIED ValidatePeerCertificateChainResp_ValidationResult = 0 + ValidatePeerCertificateChainResp_SUCCESS ValidatePeerCertificateChainResp_ValidationResult = 1 + ValidatePeerCertificateChainResp_FAILURE ValidatePeerCertificateChainResp_ValidationResult = 2 +) + +// Enum value maps for ValidatePeerCertificateChainResp_ValidationResult. +var ( + ValidatePeerCertificateChainResp_ValidationResult_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "SUCCESS", + 2: "FAILURE", + } + ValidatePeerCertificateChainResp_ValidationResult_value = map[string]int32{ + "UNSPECIFIED": 0, + "SUCCESS": 1, + "FAILURE": 2, + } +) + +func (x ValidatePeerCertificateChainResp_ValidationResult) Enum() *ValidatePeerCertificateChainResp_ValidationResult { + p := new(ValidatePeerCertificateChainResp_ValidationResult) + *p = x + return p +} + +func (x ValidatePeerCertificateChainResp_ValidationResult) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ValidatePeerCertificateChainResp_ValidationResult) Descriptor() protoreflect.EnumDescriptor { + return file_internal_proto_v2_s2a_s2a_proto_enumTypes[5].Descriptor() +} + +func (ValidatePeerCertificateChainResp_ValidationResult) Type() protoreflect.EnumType { + return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[5] +} + +func (x ValidatePeerCertificateChainResp_ValidationResult) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ValidatePeerCertificateChainResp_ValidationResult.Descriptor instead. +func (ValidatePeerCertificateChainResp_ValidationResult) EnumDescriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{10, 0} +} + +type AlpnPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // If true, the application MUST perform ALPN negotiation. + EnableAlpnNegotiation bool `protobuf:"varint,1,opt,name=enable_alpn_negotiation,json=enableAlpnNegotiation,proto3" json:"enable_alpn_negotiation,omitempty"` + // The ordered list of ALPN protocols that specify how the application SHOULD + // negotiate ALPN during the TLS handshake. + // + // The application MAY ignore any ALPN protocols in this list that are not + // supported by the application. + AlpnProtocols []common_go_proto.AlpnProtocol `protobuf:"varint,2,rep,packed,name=alpn_protocols,json=alpnProtocols,proto3,enum=s2a.proto.v2.AlpnProtocol" json:"alpn_protocols,omitempty"` +} + +func (x *AlpnPolicy) Reset() { + *x = AlpnPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AlpnPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AlpnPolicy) ProtoMessage() {} + +func (x *AlpnPolicy) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AlpnPolicy.ProtoReflect.Descriptor instead. +func (*AlpnPolicy) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{0} +} + +func (x *AlpnPolicy) GetEnableAlpnNegotiation() bool { + if x != nil { + return x.EnableAlpnNegotiation + } + return false +} + +func (x *AlpnPolicy) GetAlpnProtocols() []common_go_proto.AlpnProtocol { + if x != nil { + return x.AlpnProtocols + } + return nil +} + +type AuthenticationMechanism struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Applications may specify an identity associated to an authentication + // mechanism. Otherwise, S2A assumes that the authentication mechanism is + // associated with the default identity. If the default identity cannot be + // determined, the request is rejected. + Identity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + // Types that are assignable to MechanismOneof: + // + // *AuthenticationMechanism_Token + MechanismOneof isAuthenticationMechanism_MechanismOneof `protobuf_oneof:"mechanism_oneof"` +} + +func (x *AuthenticationMechanism) Reset() { + *x = AuthenticationMechanism{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuthenticationMechanism) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthenticationMechanism) ProtoMessage() {} + +func (x *AuthenticationMechanism) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthenticationMechanism.ProtoReflect.Descriptor instead. +func (*AuthenticationMechanism) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{1} +} + +func (x *AuthenticationMechanism) GetIdentity() *common_go_proto1.Identity { + if x != nil { + return x.Identity + } + return nil +} + +func (m *AuthenticationMechanism) GetMechanismOneof() isAuthenticationMechanism_MechanismOneof { + if m != nil { + return m.MechanismOneof + } + return nil +} + +func (x *AuthenticationMechanism) GetToken() string { + if x, ok := x.GetMechanismOneof().(*AuthenticationMechanism_Token); ok { + return x.Token + } + return "" +} + +type isAuthenticationMechanism_MechanismOneof interface { + isAuthenticationMechanism_MechanismOneof() +} + +type AuthenticationMechanism_Token struct { + // A token that the application uses to authenticate itself to S2A. + Token string `protobuf:"bytes,2,opt,name=token,proto3,oneof"` +} + +func (*AuthenticationMechanism_Token) isAuthenticationMechanism_MechanismOneof() {} + +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code that is specific to the application and the implementation + // of S2A, e.g., gRPC status code. + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // The status details. + Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{2} +} + +func (x *Status) GetCode() uint32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetDetails() string { + if x != nil { + return x.Details + } + return "" +} + +type GetTlsConfigurationReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The role of the application in the TLS connection. + ConnectionSide common_go_proto.ConnectionSide `protobuf:"varint,1,opt,name=connection_side,json=connectionSide,proto3,enum=s2a.proto.v2.ConnectionSide" json:"connection_side,omitempty"` + // The server name indication (SNI) extension, which MAY be populated when a + // server is offloading to S2A. The SNI is used to determine the server + // identity if the local identity in the request is empty. + Sni string `protobuf:"bytes,2,opt,name=sni,proto3" json:"sni,omitempty"` +} + +func (x *GetTlsConfigurationReq) Reset() { + *x = GetTlsConfigurationReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTlsConfigurationReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTlsConfigurationReq) ProtoMessage() {} + +func (x *GetTlsConfigurationReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTlsConfigurationReq.ProtoReflect.Descriptor instead. +func (*GetTlsConfigurationReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{3} +} + +func (x *GetTlsConfigurationReq) GetConnectionSide() common_go_proto.ConnectionSide { + if x != nil { + return x.ConnectionSide + } + return common_go_proto.ConnectionSide(0) +} + +func (x *GetTlsConfigurationReq) GetSni() string { + if x != nil { + return x.Sni + } + return "" +} + +type GetTlsConfigurationResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to TlsConfiguration: + // + // *GetTlsConfigurationResp_ClientTlsConfiguration_ + // *GetTlsConfigurationResp_ServerTlsConfiguration_ + TlsConfiguration isGetTlsConfigurationResp_TlsConfiguration `protobuf_oneof:"tls_configuration"` +} + +func (x *GetTlsConfigurationResp) Reset() { + *x = GetTlsConfigurationResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTlsConfigurationResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTlsConfigurationResp) ProtoMessage() {} + +func (x *GetTlsConfigurationResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTlsConfigurationResp.ProtoReflect.Descriptor instead. +func (*GetTlsConfigurationResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4} +} + +func (m *GetTlsConfigurationResp) GetTlsConfiguration() isGetTlsConfigurationResp_TlsConfiguration { + if m != nil { + return m.TlsConfiguration + } + return nil +} + +func (x *GetTlsConfigurationResp) GetClientTlsConfiguration() *GetTlsConfigurationResp_ClientTlsConfiguration { + if x, ok := x.GetTlsConfiguration().(*GetTlsConfigurationResp_ClientTlsConfiguration_); ok { + return x.ClientTlsConfiguration + } + return nil +} + +func (x *GetTlsConfigurationResp) GetServerTlsConfiguration() *GetTlsConfigurationResp_ServerTlsConfiguration { + if x, ok := x.GetTlsConfiguration().(*GetTlsConfigurationResp_ServerTlsConfiguration_); ok { + return x.ServerTlsConfiguration + } + return nil +} + +type isGetTlsConfigurationResp_TlsConfiguration interface { + isGetTlsConfigurationResp_TlsConfiguration() +} + +type GetTlsConfigurationResp_ClientTlsConfiguration_ struct { + ClientTlsConfiguration *GetTlsConfigurationResp_ClientTlsConfiguration `protobuf:"bytes,1,opt,name=client_tls_configuration,json=clientTlsConfiguration,proto3,oneof"` +} + +type GetTlsConfigurationResp_ServerTlsConfiguration_ struct { + ServerTlsConfiguration *GetTlsConfigurationResp_ServerTlsConfiguration `protobuf:"bytes,2,opt,name=server_tls_configuration,json=serverTlsConfiguration,proto3,oneof"` +} + +func (*GetTlsConfigurationResp_ClientTlsConfiguration_) isGetTlsConfigurationResp_TlsConfiguration() { +} + +func (*GetTlsConfigurationResp_ServerTlsConfiguration_) isGetTlsConfigurationResp_TlsConfiguration() { +} + +type OffloadPrivateKeyOperationReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The operation the private key is used for. + Operation OffloadPrivateKeyOperationReq_PrivateKeyOperation `protobuf:"varint,1,opt,name=operation,proto3,enum=s2a.proto.v2.OffloadPrivateKeyOperationReq_PrivateKeyOperation" json:"operation,omitempty"` + // The signature algorithm to be used for signing operations. + SignatureAlgorithm SignatureAlgorithm `protobuf:"varint,2,opt,name=signature_algorithm,json=signatureAlgorithm,proto3,enum=s2a.proto.v2.SignatureAlgorithm" json:"signature_algorithm,omitempty"` + // The input bytes to be signed or decrypted. + // + // Types that are assignable to InBytes: + // + // *OffloadPrivateKeyOperationReq_RawBytes + // *OffloadPrivateKeyOperationReq_Sha256Digest + // *OffloadPrivateKeyOperationReq_Sha384Digest + // *OffloadPrivateKeyOperationReq_Sha512Digest + InBytes isOffloadPrivateKeyOperationReq_InBytes `protobuf_oneof:"in_bytes"` +} + +func (x *OffloadPrivateKeyOperationReq) Reset() { + *x = OffloadPrivateKeyOperationReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OffloadPrivateKeyOperationReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OffloadPrivateKeyOperationReq) ProtoMessage() {} + +func (x *OffloadPrivateKeyOperationReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OffloadPrivateKeyOperationReq.ProtoReflect.Descriptor instead. +func (*OffloadPrivateKeyOperationReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{5} +} + +func (x *OffloadPrivateKeyOperationReq) GetOperation() OffloadPrivateKeyOperationReq_PrivateKeyOperation { + if x != nil { + return x.Operation + } + return OffloadPrivateKeyOperationReq_UNSPECIFIED +} + +func (x *OffloadPrivateKeyOperationReq) GetSignatureAlgorithm() SignatureAlgorithm { + if x != nil { + return x.SignatureAlgorithm + } + return SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED +} + +func (m *OffloadPrivateKeyOperationReq) GetInBytes() isOffloadPrivateKeyOperationReq_InBytes { + if m != nil { + return m.InBytes + } + return nil +} + +func (x *OffloadPrivateKeyOperationReq) GetRawBytes() []byte { + if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_RawBytes); ok { + return x.RawBytes + } + return nil +} + +func (x *OffloadPrivateKeyOperationReq) GetSha256Digest() []byte { + if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha256Digest); ok { + return x.Sha256Digest + } + return nil +} + +func (x *OffloadPrivateKeyOperationReq) GetSha384Digest() []byte { + if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha384Digest); ok { + return x.Sha384Digest + } + return nil +} + +func (x *OffloadPrivateKeyOperationReq) GetSha512Digest() []byte { + if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha512Digest); ok { + return x.Sha512Digest + } + return nil +} + +type isOffloadPrivateKeyOperationReq_InBytes interface { + isOffloadPrivateKeyOperationReq_InBytes() +} + +type OffloadPrivateKeyOperationReq_RawBytes struct { + // Raw bytes to be hashed and signed, or decrypted. + RawBytes []byte `protobuf:"bytes,4,opt,name=raw_bytes,json=rawBytes,proto3,oneof"` +} + +type OffloadPrivateKeyOperationReq_Sha256Digest struct { + // A SHA256 hash to be signed. Must be 32 bytes. + Sha256Digest []byte `protobuf:"bytes,5,opt,name=sha256_digest,json=sha256Digest,proto3,oneof"` +} + +type OffloadPrivateKeyOperationReq_Sha384Digest struct { + // A SHA384 hash to be signed. Must be 48 bytes. + Sha384Digest []byte `protobuf:"bytes,6,opt,name=sha384_digest,json=sha384Digest,proto3,oneof"` +} + +type OffloadPrivateKeyOperationReq_Sha512Digest struct { + // A SHA512 hash to be signed. Must be 64 bytes. + Sha512Digest []byte `protobuf:"bytes,7,opt,name=sha512_digest,json=sha512Digest,proto3,oneof"` +} + +func (*OffloadPrivateKeyOperationReq_RawBytes) isOffloadPrivateKeyOperationReq_InBytes() {} + +func (*OffloadPrivateKeyOperationReq_Sha256Digest) isOffloadPrivateKeyOperationReq_InBytes() {} + +func (*OffloadPrivateKeyOperationReq_Sha384Digest) isOffloadPrivateKeyOperationReq_InBytes() {} + +func (*OffloadPrivateKeyOperationReq_Sha512Digest) isOffloadPrivateKeyOperationReq_InBytes() {} + +type OffloadPrivateKeyOperationResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The signed or decrypted output bytes. + OutBytes []byte `protobuf:"bytes,1,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"` +} + +func (x *OffloadPrivateKeyOperationResp) Reset() { + *x = OffloadPrivateKeyOperationResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OffloadPrivateKeyOperationResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OffloadPrivateKeyOperationResp) ProtoMessage() {} + +func (x *OffloadPrivateKeyOperationResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OffloadPrivateKeyOperationResp.ProtoReflect.Descriptor instead. +func (*OffloadPrivateKeyOperationResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{6} +} + +func (x *OffloadPrivateKeyOperationResp) GetOutBytes() []byte { + if x != nil { + return x.OutBytes + } + return nil +} + +type OffloadResumptionKeyOperationReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The operation the resumption key is used for. + Operation OffloadResumptionKeyOperationReq_ResumptionKeyOperation `protobuf:"varint,1,opt,name=operation,proto3,enum=s2a.proto.v2.OffloadResumptionKeyOperationReq_ResumptionKeyOperation" json:"operation,omitempty"` + // The bytes to be encrypted or decrypted. + InBytes []byte `protobuf:"bytes,2,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` +} + +func (x *OffloadResumptionKeyOperationReq) Reset() { + *x = OffloadResumptionKeyOperationReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OffloadResumptionKeyOperationReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OffloadResumptionKeyOperationReq) ProtoMessage() {} + +func (x *OffloadResumptionKeyOperationReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OffloadResumptionKeyOperationReq.ProtoReflect.Descriptor instead. +func (*OffloadResumptionKeyOperationReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{7} +} + +func (x *OffloadResumptionKeyOperationReq) GetOperation() OffloadResumptionKeyOperationReq_ResumptionKeyOperation { + if x != nil { + return x.Operation + } + return OffloadResumptionKeyOperationReq_UNSPECIFIED +} + +func (x *OffloadResumptionKeyOperationReq) GetInBytes() []byte { + if x != nil { + return x.InBytes + } + return nil +} + +type OffloadResumptionKeyOperationResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The encrypted or decrypted bytes. + OutBytes []byte `protobuf:"bytes,1,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"` +} + +func (x *OffloadResumptionKeyOperationResp) Reset() { + *x = OffloadResumptionKeyOperationResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OffloadResumptionKeyOperationResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OffloadResumptionKeyOperationResp) ProtoMessage() {} + +func (x *OffloadResumptionKeyOperationResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OffloadResumptionKeyOperationResp.ProtoReflect.Descriptor instead. +func (*OffloadResumptionKeyOperationResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{8} +} + +func (x *OffloadResumptionKeyOperationResp) GetOutBytes() []byte { + if x != nil { + return x.OutBytes + } + return nil +} + +type ValidatePeerCertificateChainReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The verification mode that S2A MUST use to validate the peer certificate + // chain. + Mode ValidatePeerCertificateChainReq_VerificationMode `protobuf:"varint,1,opt,name=mode,proto3,enum=s2a.proto.v2.ValidatePeerCertificateChainReq_VerificationMode" json:"mode,omitempty"` + // Types that are assignable to PeerOneof: + // + // *ValidatePeerCertificateChainReq_ClientPeer_ + // *ValidatePeerCertificateChainReq_ServerPeer_ + PeerOneof isValidatePeerCertificateChainReq_PeerOneof `protobuf_oneof:"peer_oneof"` +} + +func (x *ValidatePeerCertificateChainReq) Reset() { + *x = ValidatePeerCertificateChainReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidatePeerCertificateChainReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidatePeerCertificateChainReq) ProtoMessage() {} + +func (x *ValidatePeerCertificateChainReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidatePeerCertificateChainReq.ProtoReflect.Descriptor instead. +func (*ValidatePeerCertificateChainReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9} +} + +func (x *ValidatePeerCertificateChainReq) GetMode() ValidatePeerCertificateChainReq_VerificationMode { + if x != nil { + return x.Mode + } + return ValidatePeerCertificateChainReq_UNSPECIFIED +} + +func (m *ValidatePeerCertificateChainReq) GetPeerOneof() isValidatePeerCertificateChainReq_PeerOneof { + if m != nil { + return m.PeerOneof + } + return nil +} + +func (x *ValidatePeerCertificateChainReq) GetClientPeer() *ValidatePeerCertificateChainReq_ClientPeer { + if x, ok := x.GetPeerOneof().(*ValidatePeerCertificateChainReq_ClientPeer_); ok { + return x.ClientPeer + } + return nil +} + +func (x *ValidatePeerCertificateChainReq) GetServerPeer() *ValidatePeerCertificateChainReq_ServerPeer { + if x, ok := x.GetPeerOneof().(*ValidatePeerCertificateChainReq_ServerPeer_); ok { + return x.ServerPeer + } + return nil +} + +type isValidatePeerCertificateChainReq_PeerOneof interface { + isValidatePeerCertificateChainReq_PeerOneof() +} + +type ValidatePeerCertificateChainReq_ClientPeer_ struct { + ClientPeer *ValidatePeerCertificateChainReq_ClientPeer `protobuf:"bytes,2,opt,name=client_peer,json=clientPeer,proto3,oneof"` +} + +type ValidatePeerCertificateChainReq_ServerPeer_ struct { + ServerPeer *ValidatePeerCertificateChainReq_ServerPeer `protobuf:"bytes,3,opt,name=server_peer,json=serverPeer,proto3,oneof"` +} + +func (*ValidatePeerCertificateChainReq_ClientPeer_) isValidatePeerCertificateChainReq_PeerOneof() {} + +func (*ValidatePeerCertificateChainReq_ServerPeer_) isValidatePeerCertificateChainReq_PeerOneof() {} + +type ValidatePeerCertificateChainResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The result of validating the peer certificate chain. + ValidationResult ValidatePeerCertificateChainResp_ValidationResult `protobuf:"varint,1,opt,name=validation_result,json=validationResult,proto3,enum=s2a.proto.v2.ValidatePeerCertificateChainResp_ValidationResult" json:"validation_result,omitempty"` + // The validation details. This field is only populated when the validation + // result is NOT SUCCESS. + ValidationDetails string `protobuf:"bytes,2,opt,name=validation_details,json=validationDetails,proto3" json:"validation_details,omitempty"` + // The S2A context contains information from the peer certificate chain. + // + // The S2A context MAY be populated even if validation of the peer certificate + // chain fails. + Context *s2a_context_go_proto.S2AContext `protobuf:"bytes,3,opt,name=context,proto3" json:"context,omitempty"` +} + +func (x *ValidatePeerCertificateChainResp) Reset() { + *x = ValidatePeerCertificateChainResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidatePeerCertificateChainResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidatePeerCertificateChainResp) ProtoMessage() {} + +func (x *ValidatePeerCertificateChainResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidatePeerCertificateChainResp.ProtoReflect.Descriptor instead. +func (*ValidatePeerCertificateChainResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{10} +} + +func (x *ValidatePeerCertificateChainResp) GetValidationResult() ValidatePeerCertificateChainResp_ValidationResult { + if x != nil { + return x.ValidationResult + } + return ValidatePeerCertificateChainResp_UNSPECIFIED +} + +func (x *ValidatePeerCertificateChainResp) GetValidationDetails() string { + if x != nil { + return x.ValidationDetails + } + return "" +} + +func (x *ValidatePeerCertificateChainResp) GetContext() *s2a_context_go_proto.S2AContext { + if x != nil { + return x.Context + } + return nil +} + +type SessionReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The identity corresponding to the TLS configurations that MUST be used for + // the TLS handshake. + // + // If a managed identity already exists, the local identity and authentication + // mechanisms are ignored. If a managed identity doesn't exist and the local + // identity is not populated, S2A will try to deduce the managed identity to + // use from the SNI extension. If that also fails, S2A uses the default + // identity (if one exists). + LocalIdentity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"` + // The authentication mechanisms that the application wishes to use to + // authenticate to S2A, ordered by preference. S2A will always use the first + // authentication mechanism that matches the managed identity. + AuthenticationMechanisms []*AuthenticationMechanism `protobuf:"bytes,2,rep,name=authentication_mechanisms,json=authenticationMechanisms,proto3" json:"authentication_mechanisms,omitempty"` + // Types that are assignable to ReqOneof: + // + // *SessionReq_GetTlsConfigurationReq + // *SessionReq_OffloadPrivateKeyOperationReq + // *SessionReq_OffloadResumptionKeyOperationReq + // *SessionReq_ValidatePeerCertificateChainReq + ReqOneof isSessionReq_ReqOneof `protobuf_oneof:"req_oneof"` +} + +func (x *SessionReq) Reset() { + *x = SessionReq{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionReq) ProtoMessage() {} + +func (x *SessionReq) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionReq.ProtoReflect.Descriptor instead. +func (*SessionReq) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{11} +} + +func (x *SessionReq) GetLocalIdentity() *common_go_proto1.Identity { + if x != nil { + return x.LocalIdentity + } + return nil +} + +func (x *SessionReq) GetAuthenticationMechanisms() []*AuthenticationMechanism { + if x != nil { + return x.AuthenticationMechanisms + } + return nil +} + +func (m *SessionReq) GetReqOneof() isSessionReq_ReqOneof { + if m != nil { + return m.ReqOneof + } + return nil +} + +func (x *SessionReq) GetGetTlsConfigurationReq() *GetTlsConfigurationReq { + if x, ok := x.GetReqOneof().(*SessionReq_GetTlsConfigurationReq); ok { + return x.GetTlsConfigurationReq + } + return nil +} + +func (x *SessionReq) GetOffloadPrivateKeyOperationReq() *OffloadPrivateKeyOperationReq { + if x, ok := x.GetReqOneof().(*SessionReq_OffloadPrivateKeyOperationReq); ok { + return x.OffloadPrivateKeyOperationReq + } + return nil +} + +func (x *SessionReq) GetOffloadResumptionKeyOperationReq() *OffloadResumptionKeyOperationReq { + if x, ok := x.GetReqOneof().(*SessionReq_OffloadResumptionKeyOperationReq); ok { + return x.OffloadResumptionKeyOperationReq + } + return nil +} + +func (x *SessionReq) GetValidatePeerCertificateChainReq() *ValidatePeerCertificateChainReq { + if x, ok := x.GetReqOneof().(*SessionReq_ValidatePeerCertificateChainReq); ok { + return x.ValidatePeerCertificateChainReq + } + return nil +} + +type isSessionReq_ReqOneof interface { + isSessionReq_ReqOneof() +} + +type SessionReq_GetTlsConfigurationReq struct { + // Requests the certificate chain and TLS configuration corresponding to the + // local identity, which the application MUST use to negotiate the TLS + // handshake. + GetTlsConfigurationReq *GetTlsConfigurationReq `protobuf:"bytes,3,opt,name=get_tls_configuration_req,json=getTlsConfigurationReq,proto3,oneof"` +} + +type SessionReq_OffloadPrivateKeyOperationReq struct { + // Signs or decrypts the input bytes using a private key corresponding to + // the local identity in the request. + // + // WARNING: More than one OffloadPrivateKeyOperationReq may be sent to the + // S2Av2 by a server during a TLS 1.2 handshake. + OffloadPrivateKeyOperationReq *OffloadPrivateKeyOperationReq `protobuf:"bytes,4,opt,name=offload_private_key_operation_req,json=offloadPrivateKeyOperationReq,proto3,oneof"` +} + +type SessionReq_OffloadResumptionKeyOperationReq struct { + // Encrypts or decrypts the input bytes using a resumption key corresponding + // to the local identity in the request. + OffloadResumptionKeyOperationReq *OffloadResumptionKeyOperationReq `protobuf:"bytes,5,opt,name=offload_resumption_key_operation_req,json=offloadResumptionKeyOperationReq,proto3,oneof"` +} + +type SessionReq_ValidatePeerCertificateChainReq struct { + // Verifies the peer's certificate chain using + // (a) trust bundles corresponding to the local identity in the request, and + // (b) the verification mode in the request. + ValidatePeerCertificateChainReq *ValidatePeerCertificateChainReq `protobuf:"bytes,6,opt,name=validate_peer_certificate_chain_req,json=validatePeerCertificateChainReq,proto3,oneof"` +} + +func (*SessionReq_GetTlsConfigurationReq) isSessionReq_ReqOneof() {} + +func (*SessionReq_OffloadPrivateKeyOperationReq) isSessionReq_ReqOneof() {} + +func (*SessionReq_OffloadResumptionKeyOperationReq) isSessionReq_ReqOneof() {} + +func (*SessionReq_ValidatePeerCertificateChainReq) isSessionReq_ReqOneof() {} + +type SessionResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Status of the session response. + // + // The status field is populated so that if an error occurs when making an + // individual request, then communication with the S2A may continue. If an + // error is returned directly (e.g. at the gRPC layer), then it may result + // that the bidirectional stream being closed. + Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // Types that are assignable to RespOneof: + // + // *SessionResp_GetTlsConfigurationResp + // *SessionResp_OffloadPrivateKeyOperationResp + // *SessionResp_OffloadResumptionKeyOperationResp + // *SessionResp_ValidatePeerCertificateChainResp + RespOneof isSessionResp_RespOneof `protobuf_oneof:"resp_oneof"` +} + +func (x *SessionResp) Reset() { + *x = SessionResp{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SessionResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SessionResp) ProtoMessage() {} + +func (x *SessionResp) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SessionResp.ProtoReflect.Descriptor instead. +func (*SessionResp) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{12} +} + +func (x *SessionResp) GetStatus() *Status { + if x != nil { + return x.Status + } + return nil +} + +func (m *SessionResp) GetRespOneof() isSessionResp_RespOneof { + if m != nil { + return m.RespOneof + } + return nil +} + +func (x *SessionResp) GetGetTlsConfigurationResp() *GetTlsConfigurationResp { + if x, ok := x.GetRespOneof().(*SessionResp_GetTlsConfigurationResp); ok { + return x.GetTlsConfigurationResp + } + return nil +} + +func (x *SessionResp) GetOffloadPrivateKeyOperationResp() *OffloadPrivateKeyOperationResp { + if x, ok := x.GetRespOneof().(*SessionResp_OffloadPrivateKeyOperationResp); ok { + return x.OffloadPrivateKeyOperationResp + } + return nil +} + +func (x *SessionResp) GetOffloadResumptionKeyOperationResp() *OffloadResumptionKeyOperationResp { + if x, ok := x.GetRespOneof().(*SessionResp_OffloadResumptionKeyOperationResp); ok { + return x.OffloadResumptionKeyOperationResp + } + return nil +} + +func (x *SessionResp) GetValidatePeerCertificateChainResp() *ValidatePeerCertificateChainResp { + if x, ok := x.GetRespOneof().(*SessionResp_ValidatePeerCertificateChainResp); ok { + return x.ValidatePeerCertificateChainResp + } + return nil +} + +type isSessionResp_RespOneof interface { + isSessionResp_RespOneof() +} + +type SessionResp_GetTlsConfigurationResp struct { + // Contains the certificate chain and TLS configurations corresponding to + // the local identity. + GetTlsConfigurationResp *GetTlsConfigurationResp `protobuf:"bytes,2,opt,name=get_tls_configuration_resp,json=getTlsConfigurationResp,proto3,oneof"` +} + +type SessionResp_OffloadPrivateKeyOperationResp struct { + // Contains the signed or encrypted output bytes using the private key + // corresponding to the local identity. + OffloadPrivateKeyOperationResp *OffloadPrivateKeyOperationResp `protobuf:"bytes,3,opt,name=offload_private_key_operation_resp,json=offloadPrivateKeyOperationResp,proto3,oneof"` +} + +type SessionResp_OffloadResumptionKeyOperationResp struct { + // Contains the encrypted or decrypted output bytes using the resumption key + // corresponding to the local identity. + OffloadResumptionKeyOperationResp *OffloadResumptionKeyOperationResp `protobuf:"bytes,4,opt,name=offload_resumption_key_operation_resp,json=offloadResumptionKeyOperationResp,proto3,oneof"` +} + +type SessionResp_ValidatePeerCertificateChainResp struct { + // Contains the validation result, peer identity and fingerprints of peer + // certificates. + ValidatePeerCertificateChainResp *ValidatePeerCertificateChainResp `protobuf:"bytes,5,opt,name=validate_peer_certificate_chain_resp,json=validatePeerCertificateChainResp,proto3,oneof"` +} + +func (*SessionResp_GetTlsConfigurationResp) isSessionResp_RespOneof() {} + +func (*SessionResp_OffloadPrivateKeyOperationResp) isSessionResp_RespOneof() {} + +func (*SessionResp_OffloadResumptionKeyOperationResp) isSessionResp_RespOneof() {} + +func (*SessionResp_ValidatePeerCertificateChainResp) isSessionResp_RespOneof() {} + +// Next ID: 8 +type GetTlsConfigurationResp_ClientTlsConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The certificate chain that the client MUST use for the TLS handshake. + // It's a list of PEM-encoded certificates, ordered from leaf to root, + // excluding the root. + CertificateChain []string `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` + // The minimum TLS version number that the client MUST use for the TLS + // handshake. If this field is not provided, the client MUST use the default + // minimum version of the client's TLS library. + MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"min_tls_version,omitempty"` + // The maximum TLS version number that the client MUST use for the TLS + // handshake. If this field is not provided, the client MUST use the default + // maximum version of the client's TLS library. + MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"max_tls_version,omitempty"` + // The ordered list of TLS 1.0-1.2 ciphersuites that the client MAY offer to + // negotiate in the TLS handshake. + Ciphersuites []common_go_proto.Ciphersuite `protobuf:"varint,6,rep,packed,name=ciphersuites,proto3,enum=s2a.proto.v2.Ciphersuite" json:"ciphersuites,omitempty"` + // The policy that dictates how the client negotiates ALPN during the TLS + // handshake. + AlpnPolicy *AlpnPolicy `protobuf:"bytes,7,opt,name=alpn_policy,json=alpnPolicy,proto3" json:"alpn_policy,omitempty"` +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) Reset() { + *x = GetTlsConfigurationResp_ClientTlsConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTlsConfigurationResp_ClientTlsConfiguration) ProtoMessage() {} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTlsConfigurationResp_ClientTlsConfiguration.ProtoReflect.Descriptor instead. +func (*GetTlsConfigurationResp_ClientTlsConfiguration) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetCertificateChain() []string { + if x != nil { + return x.CertificateChain + } + return nil +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetMinTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MinTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetMaxTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MaxTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetCiphersuites() []common_go_proto.Ciphersuite { + if x != nil { + return x.Ciphersuites + } + return nil +} + +func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetAlpnPolicy() *AlpnPolicy { + if x != nil { + return x.AlpnPolicy + } + return nil +} + +// Next ID: 12 +type GetTlsConfigurationResp_ServerTlsConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The certificate chain that the server MUST use for the TLS handshake. + // It's a list of PEM-encoded certificates, ordered from leaf to root, + // excluding the root. + CertificateChain []string `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` + // The minimum TLS version number that the server MUST use for the TLS + // handshake. If this field is not provided, the server MUST use the default + // minimum version of the server's TLS library. + MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"min_tls_version,omitempty"` + // The maximum TLS version number that the server MUST use for the TLS + // handshake. If this field is not provided, the server MUST use the default + // maximum version of the server's TLS library. + MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"max_tls_version,omitempty"` + // The ordered list of TLS 1.0-1.2 ciphersuites that the server MAY offer to + // negotiate in the TLS handshake. + Ciphersuites []common_go_proto.Ciphersuite `protobuf:"varint,10,rep,packed,name=ciphersuites,proto3,enum=s2a.proto.v2.Ciphersuite" json:"ciphersuites,omitempty"` + // Whether to enable TLS resumption. + TlsResumptionEnabled bool `protobuf:"varint,6,opt,name=tls_resumption_enabled,json=tlsResumptionEnabled,proto3" json:"tls_resumption_enabled,omitempty"` + // Whether the server MUST request a client certificate (i.e. to negotiate + // TLS vs. mTLS). + RequestClientCertificate GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate `protobuf:"varint,7,opt,name=request_client_certificate,json=requestClientCertificate,proto3,enum=s2a.proto.v2.GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate" json:"request_client_certificate,omitempty"` + // Returns the maximum number of extra bytes that + // |OffloadResumptionKeyOperation| can add to the number of unencrypted + // bytes to form the encrypted bytes. + MaxOverheadOfTicketAead uint32 `protobuf:"varint,9,opt,name=max_overhead_of_ticket_aead,json=maxOverheadOfTicketAead,proto3" json:"max_overhead_of_ticket_aead,omitempty"` + // The policy that dictates how the server negotiates ALPN during the TLS + // handshake. + AlpnPolicy *AlpnPolicy `protobuf:"bytes,11,opt,name=alpn_policy,json=alpnPolicy,proto3" json:"alpn_policy,omitempty"` +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) Reset() { + *x = GetTlsConfigurationResp_ServerTlsConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTlsConfigurationResp_ServerTlsConfiguration) ProtoMessage() {} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTlsConfigurationResp_ServerTlsConfiguration.ProtoReflect.Descriptor instead. +func (*GetTlsConfigurationResp_ServerTlsConfiguration) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 1} +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetCertificateChain() []string { + if x != nil { + return x.CertificateChain + } + return nil +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMinTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MinTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMaxTlsVersion() common_go_proto.TLSVersion { + if x != nil { + return x.MaxTlsVersion + } + return common_go_proto.TLSVersion(0) +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetCiphersuites() []common_go_proto.Ciphersuite { + if x != nil { + return x.Ciphersuites + } + return nil +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetTlsResumptionEnabled() bool { + if x != nil { + return x.TlsResumptionEnabled + } + return false +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetRequestClientCertificate() GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate { + if x != nil { + return x.RequestClientCertificate + } + return GetTlsConfigurationResp_ServerTlsConfiguration_UNSPECIFIED +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMaxOverheadOfTicketAead() uint32 { + if x != nil { + return x.MaxOverheadOfTicketAead + } + return 0 +} + +func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetAlpnPolicy() *AlpnPolicy { + if x != nil { + return x.AlpnPolicy + } + return nil +} + +type ValidatePeerCertificateChainReq_ClientPeer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The certificate chain to be verified. The chain MUST be a list of + // DER-encoded certificates, ordered from leaf to root, excluding the root. + CertificateChain [][]byte `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` +} + +func (x *ValidatePeerCertificateChainReq_ClientPeer) Reset() { + *x = ValidatePeerCertificateChainReq_ClientPeer{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidatePeerCertificateChainReq_ClientPeer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidatePeerCertificateChainReq_ClientPeer) ProtoMessage() {} + +func (x *ValidatePeerCertificateChainReq_ClientPeer) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidatePeerCertificateChainReq_ClientPeer.ProtoReflect.Descriptor instead. +func (*ValidatePeerCertificateChainReq_ClientPeer) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 0} +} + +func (x *ValidatePeerCertificateChainReq_ClientPeer) GetCertificateChain() [][]byte { + if x != nil { + return x.CertificateChain + } + return nil +} + +type ValidatePeerCertificateChainReq_ServerPeer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The certificate chain to be verified. The chain MUST be a list of + // DER-encoded certificates, ordered from leaf to root, excluding the root. + CertificateChain [][]byte `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"` + // The expected hostname of the server. + ServerHostname string `protobuf:"bytes,2,opt,name=server_hostname,json=serverHostname,proto3" json:"server_hostname,omitempty"` + // The UnrestrictedClientPolicy specified by the user. + SerializedUnrestrictedClientPolicy []byte `protobuf:"bytes,3,opt,name=serialized_unrestricted_client_policy,json=serializedUnrestrictedClientPolicy,proto3" json:"serialized_unrestricted_client_policy,omitempty"` +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) Reset() { + *x = ValidatePeerCertificateChainReq_ServerPeer{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidatePeerCertificateChainReq_ServerPeer) ProtoMessage() {} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) ProtoReflect() protoreflect.Message { + mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidatePeerCertificateChainReq_ServerPeer.ProtoReflect.Descriptor instead. +func (*ValidatePeerCertificateChainReq_ServerPeer) Descriptor() ([]byte, []int) { + return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 1} +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) GetCertificateChain() [][]byte { + if x != nil { + return x.CertificateChain + } + return nil +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) GetServerHostname() string { + if x != nil { + return x.ServerHostname + } + return "" +} + +func (x *ValidatePeerCertificateChainReq_ServerPeer) GetSerializedUnrestrictedClientPolicy() []byte { + if x != nil { + return x.SerializedUnrestrictedClientPolicy + } + return nil +} + +var File_internal_proto_v2_s2a_s2a_proto protoreflect.FileDescriptor + +var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a, + 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, + 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a, + 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x41, 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0x75, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, + 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, + 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x36, 0x0a, 0x06, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45, + 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x69, 0x64, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, + 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6c, + 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x43, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, 0x0a, + 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, + 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, + 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, + 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, + 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08, + 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, 0x06, 0x0a, 0x16, 0x53, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, + 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, + 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, + 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, + 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x75, + 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x93, 0x01, 0x0a, 0x1a, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, + 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64, + 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x65, 0x61, 0x64, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, 0x76, 0x65, 0x72, 0x68, 0x65, + 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x65, 0x61, 0x64, 0x12, 0x39, + 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61, + 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, 0x02, 0x0a, 0x18, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, + 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, + 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x2e, 0x0a, 0x2a, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, + 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, + 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x29, 0x0a, 0x25, + 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, + 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56, + 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, 0x52, 0x45, 0x51, 0x55, 0x45, + 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, + 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, + 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, + 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41, + 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, + 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, + 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, + 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb0, 0x03, 0x0a, 0x1d, + 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x5d, 0x0a, + 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x50, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x13, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, + 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x12, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, + 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25, + 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x44, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x5f, + 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, + 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, + 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x44, 0x69, 0x67, + 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, + 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54, + 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x3d, + 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe7, 0x01, + 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, + 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, + 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, + 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, + 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, 0x4f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09, + 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf8, 0x04, 0x0a, 0x1f, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a, + 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, + 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, + 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, + 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x5b, + 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, + 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52, + 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0a, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, + 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x25, 0x73, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x72, 0x65, 0x73, 0x74, + 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, + 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x46, + 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, + 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12, + 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f, + 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, + 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f, + 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, + 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0x97, 0x05, 0x0a, 0x0a, 0x53, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, + 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18, + 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, + 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f, + 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32, + 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, + 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f, + 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, + 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, + 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, + 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, + 0x65, 0x6f, 0x66, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17, + 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, + 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, + 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a, + 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, + 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, + 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, + 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, + 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, + 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, + 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, + 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, + 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, + 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, + 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, + 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52, + 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, + 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, + 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31, + 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, + 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, + 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, + 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, + 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12, + 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, + 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, + 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, + 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32, + 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a, + 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e, + 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, + 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_proto_v2_s2a_s2a_proto_rawDescOnce sync.Once + file_internal_proto_v2_s2a_s2a_proto_rawDescData = file_internal_proto_v2_s2a_s2a_proto_rawDesc +) + +func file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP() []byte { + file_internal_proto_v2_s2a_s2a_proto_rawDescOnce.Do(func() { + file_internal_proto_v2_s2a_s2a_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_s2a_s2a_proto_rawDescData) + }) + return file_internal_proto_v2_s2a_s2a_proto_rawDescData +} + +var file_internal_proto_v2_s2a_s2a_proto_enumTypes = make([]protoimpl.EnumInfo, 6) +var file_internal_proto_v2_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{ + (SignatureAlgorithm)(0), // 0: s2a.proto.v2.SignatureAlgorithm + (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate)(0), // 1: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate + (OffloadPrivateKeyOperationReq_PrivateKeyOperation)(0), // 2: s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation + (OffloadResumptionKeyOperationReq_ResumptionKeyOperation)(0), // 3: s2a.proto.v2.OffloadResumptionKeyOperationReq.ResumptionKeyOperation + (ValidatePeerCertificateChainReq_VerificationMode)(0), // 4: s2a.proto.v2.ValidatePeerCertificateChainReq.VerificationMode + (ValidatePeerCertificateChainResp_ValidationResult)(0), // 5: s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult + (*AlpnPolicy)(nil), // 6: s2a.proto.v2.AlpnPolicy + (*AuthenticationMechanism)(nil), // 7: s2a.proto.v2.AuthenticationMechanism + (*Status)(nil), // 8: s2a.proto.v2.Status + (*GetTlsConfigurationReq)(nil), // 9: s2a.proto.v2.GetTlsConfigurationReq + (*GetTlsConfigurationResp)(nil), // 10: s2a.proto.v2.GetTlsConfigurationResp + (*OffloadPrivateKeyOperationReq)(nil), // 11: s2a.proto.v2.OffloadPrivateKeyOperationReq + (*OffloadPrivateKeyOperationResp)(nil), // 12: s2a.proto.v2.OffloadPrivateKeyOperationResp + (*OffloadResumptionKeyOperationReq)(nil), // 13: s2a.proto.v2.OffloadResumptionKeyOperationReq + (*OffloadResumptionKeyOperationResp)(nil), // 14: s2a.proto.v2.OffloadResumptionKeyOperationResp + (*ValidatePeerCertificateChainReq)(nil), // 15: s2a.proto.v2.ValidatePeerCertificateChainReq + (*ValidatePeerCertificateChainResp)(nil), // 16: s2a.proto.v2.ValidatePeerCertificateChainResp + (*SessionReq)(nil), // 17: s2a.proto.v2.SessionReq + (*SessionResp)(nil), // 18: s2a.proto.v2.SessionResp + (*GetTlsConfigurationResp_ClientTlsConfiguration)(nil), // 19: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration + (*GetTlsConfigurationResp_ServerTlsConfiguration)(nil), // 20: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration + (*ValidatePeerCertificateChainReq_ClientPeer)(nil), // 21: s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer + (*ValidatePeerCertificateChainReq_ServerPeer)(nil), // 22: s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer + (common_go_proto.AlpnProtocol)(0), // 23: s2a.proto.v2.AlpnProtocol + (*common_go_proto1.Identity)(nil), // 24: s2a.proto.Identity + (common_go_proto.ConnectionSide)(0), // 25: s2a.proto.v2.ConnectionSide + (*s2a_context_go_proto.S2AContext)(nil), // 26: s2a.proto.v2.S2AContext + (common_go_proto.TLSVersion)(0), // 27: s2a.proto.v2.TLSVersion + (common_go_proto.Ciphersuite)(0), // 28: s2a.proto.v2.Ciphersuite +} +var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{ + 23, // 0: s2a.proto.v2.AlpnPolicy.alpn_protocols:type_name -> s2a.proto.v2.AlpnProtocol + 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity + 25, // 2: s2a.proto.v2.GetTlsConfigurationReq.connection_side:type_name -> s2a.proto.v2.ConnectionSide + 19, // 3: s2a.proto.v2.GetTlsConfigurationResp.client_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration + 20, // 4: s2a.proto.v2.GetTlsConfigurationResp.server_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration + 2, // 5: s2a.proto.v2.OffloadPrivateKeyOperationReq.operation:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation + 0, // 6: s2a.proto.v2.OffloadPrivateKeyOperationReq.signature_algorithm:type_name -> s2a.proto.v2.SignatureAlgorithm + 3, // 7: s2a.proto.v2.OffloadResumptionKeyOperationReq.operation:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationReq.ResumptionKeyOperation + 4, // 8: s2a.proto.v2.ValidatePeerCertificateChainReq.mode:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.VerificationMode + 21, // 9: s2a.proto.v2.ValidatePeerCertificateChainReq.client_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer + 22, // 10: s2a.proto.v2.ValidatePeerCertificateChainReq.server_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer + 5, // 11: s2a.proto.v2.ValidatePeerCertificateChainResp.validation_result:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult + 26, // 12: s2a.proto.v2.ValidatePeerCertificateChainResp.context:type_name -> s2a.proto.v2.S2AContext + 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.Identity + 7, // 14: s2a.proto.v2.SessionReq.authentication_mechanisms:type_name -> s2a.proto.v2.AuthenticationMechanism + 9, // 15: s2a.proto.v2.SessionReq.get_tls_configuration_req:type_name -> s2a.proto.v2.GetTlsConfigurationReq + 11, // 16: s2a.proto.v2.SessionReq.offload_private_key_operation_req:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq + 13, // 17: s2a.proto.v2.SessionReq.offload_resumption_key_operation_req:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationReq + 15, // 18: s2a.proto.v2.SessionReq.validate_peer_certificate_chain_req:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq + 8, // 19: s2a.proto.v2.SessionResp.status:type_name -> s2a.proto.v2.Status + 10, // 20: s2a.proto.v2.SessionResp.get_tls_configuration_resp:type_name -> s2a.proto.v2.GetTlsConfigurationResp + 12, // 21: s2a.proto.v2.SessionResp.offload_private_key_operation_resp:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationResp + 14, // 22: s2a.proto.v2.SessionResp.offload_resumption_key_operation_resp:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationResp + 16, // 23: s2a.proto.v2.SessionResp.validate_peer_certificate_chain_resp:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp + 27, // 24: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.min_tls_version:type_name -> s2a.proto.v2.TLSVersion + 27, // 25: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.max_tls_version:type_name -> s2a.proto.v2.TLSVersion + 28, // 26: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.ciphersuites:type_name -> s2a.proto.v2.Ciphersuite + 6, // 27: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.alpn_policy:type_name -> s2a.proto.v2.AlpnPolicy + 27, // 28: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.min_tls_version:type_name -> s2a.proto.v2.TLSVersion + 27, // 29: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.max_tls_version:type_name -> s2a.proto.v2.TLSVersion + 28, // 30: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.ciphersuites:type_name -> s2a.proto.v2.Ciphersuite + 1, // 31: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.request_client_certificate:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate + 6, // 32: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.alpn_policy:type_name -> s2a.proto.v2.AlpnPolicy + 17, // 33: s2a.proto.v2.S2AService.SetUpSession:input_type -> s2a.proto.v2.SessionReq + 18, // 34: s2a.proto.v2.S2AService.SetUpSession:output_type -> s2a.proto.v2.SessionResp + 34, // [34:35] is the sub-list for method output_type + 33, // [33:34] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 33, // [33:33] is the sub-list for extension extendee + 0, // [0:33] is the sub-list for field type_name +} + +func init() { file_internal_proto_v2_s2a_s2a_proto_init() } +func file_internal_proto_v2_s2a_s2a_proto_init() { + if File_internal_proto_v2_s2a_s2a_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AlpnPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthenticationMechanism); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTlsConfigurationReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTlsConfigurationResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OffloadPrivateKeyOperationReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OffloadPrivateKeyOperationResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OffloadResumptionKeyOperationReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OffloadResumptionKeyOperationResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidatePeerCertificateChainReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidatePeerCertificateChainResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SessionResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTlsConfigurationResp_ClientTlsConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTlsConfigurationResp_ServerTlsConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidatePeerCertificateChainReq_ClientPeer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidatePeerCertificateChainReq_ServerPeer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*AuthenticationMechanism_Token)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*GetTlsConfigurationResp_ClientTlsConfiguration_)(nil), + (*GetTlsConfigurationResp_ServerTlsConfiguration_)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*OffloadPrivateKeyOperationReq_RawBytes)(nil), + (*OffloadPrivateKeyOperationReq_Sha256Digest)(nil), + (*OffloadPrivateKeyOperationReq_Sha384Digest)(nil), + (*OffloadPrivateKeyOperationReq_Sha512Digest)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []interface{}{ + (*ValidatePeerCertificateChainReq_ClientPeer_)(nil), + (*ValidatePeerCertificateChainReq_ServerPeer_)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []interface{}{ + (*SessionReq_GetTlsConfigurationReq)(nil), + (*SessionReq_OffloadPrivateKeyOperationReq)(nil), + (*SessionReq_OffloadResumptionKeyOperationReq)(nil), + (*SessionReq_ValidatePeerCertificateChainReq)(nil), + } + file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []interface{}{ + (*SessionResp_GetTlsConfigurationResp)(nil), + (*SessionResp_OffloadPrivateKeyOperationResp)(nil), + (*SessionResp_OffloadResumptionKeyOperationResp)(nil), + (*SessionResp_ValidatePeerCertificateChainResp)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_proto_v2_s2a_s2a_proto_rawDesc, + NumEnums: 6, + NumMessages: 17, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_internal_proto_v2_s2a_s2a_proto_goTypes, + DependencyIndexes: file_internal_proto_v2_s2a_s2a_proto_depIdxs, + EnumInfos: file_internal_proto_v2_s2a_s2a_proto_enumTypes, + MessageInfos: file_internal_proto_v2_s2a_s2a_proto_msgTypes, + }.Build() + File_internal_proto_v2_s2a_s2a_proto = out.File + file_internal_proto_v2_s2a_s2a_proto_rawDesc = nil + file_internal_proto_v2_s2a_s2a_proto_goTypes = nil + file_internal_proto_v2_s2a_s2a_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go new file mode 100644 index 00000000..2566df6c --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go @@ -0,0 +1,159 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.21.12 +// source: internal/proto/v2/s2a/s2a.proto + +package s2a_go_proto + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + S2AService_SetUpSession_FullMethodName = "/s2a.proto.v2.S2AService/SetUpSession" +) + +// S2AServiceClient is the client API for S2AService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type S2AServiceClient interface { + // SetUpSession is a bidirectional stream used by applications to offload + // operations from the TLS handshake. + SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) +} + +type s2AServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient { + return &s2AServiceClient{cc} +} + +func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) { + stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &s2AServiceSetUpSessionClient{stream} + return x, nil +} + +type S2AService_SetUpSessionClient interface { + Send(*SessionReq) error + Recv() (*SessionResp, error) + grpc.ClientStream +} + +type s2AServiceSetUpSessionClient struct { + grpc.ClientStream +} + +func (x *s2AServiceSetUpSessionClient) Send(m *SessionReq) error { + return x.ClientStream.SendMsg(m) +} + +func (x *s2AServiceSetUpSessionClient) Recv() (*SessionResp, error) { + m := new(SessionResp) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// S2AServiceServer is the server API for S2AService service. +// All implementations must embed UnimplementedS2AServiceServer +// for forward compatibility +type S2AServiceServer interface { + // SetUpSession is a bidirectional stream used by applications to offload + // operations from the TLS handshake. + SetUpSession(S2AService_SetUpSessionServer) error + mustEmbedUnimplementedS2AServiceServer() +} + +// UnimplementedS2AServiceServer must be embedded to have forward compatible implementations. +type UnimplementedS2AServiceServer struct { +} + +func (UnimplementedS2AServiceServer) SetUpSession(S2AService_SetUpSessionServer) error { + return status.Errorf(codes.Unimplemented, "method SetUpSession not implemented") +} +func (UnimplementedS2AServiceServer) mustEmbedUnimplementedS2AServiceServer() {} + +// UnsafeS2AServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to S2AServiceServer will +// result in compilation errors. +type UnsafeS2AServiceServer interface { + mustEmbedUnimplementedS2AServiceServer() +} + +func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) { + s.RegisterService(&S2AService_ServiceDesc, srv) +} + +func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream}) +} + +type S2AService_SetUpSessionServer interface { + Send(*SessionResp) error + Recv() (*SessionReq, error) + grpc.ServerStream +} + +type s2AServiceSetUpSessionServer struct { + grpc.ServerStream +} + +func (x *s2AServiceSetUpSessionServer) Send(m *SessionResp) error { + return x.ServerStream.SendMsg(m) +} + +func (x *s2AServiceSetUpSessionServer) Recv() (*SessionReq, error) { + m := new(SessionReq) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// S2AService_ServiceDesc is the grpc.ServiceDesc for S2AService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var S2AService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "s2a.proto.v2.S2AService", + HandlerType: (*S2AServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "SetUpSession", + Handler: _S2AService_SetUpSession_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "internal/proto/v2/s2a/s2a.proto", +} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go new file mode 100644 index 00000000..486f4ec4 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package aeadcrypter provides the interface for AEAD cipher implementations +// used by S2A's record protocol. +package aeadcrypter + +// S2AAEADCrypter is the interface for an AEAD cipher used by the S2A record +// protocol. +type S2AAEADCrypter interface { + // Encrypt encrypts the plaintext and computes the tag of dst and plaintext. + // dst and plaintext may fully overlap or not at all. + Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) + // Decrypt decrypts ciphertext and verifies the tag. dst and ciphertext may + // fully overlap or not at all. + Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) + // TagSize returns the tag size in bytes. + TagSize() int +} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go new file mode 100644 index 00000000..85c4e595 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package aeadcrypter + +import ( + "crypto/aes" + "crypto/cipher" + "fmt" +) + +// Supported key sizes in bytes. +const ( + AES128GCMKeySize = 16 + AES256GCMKeySize = 32 +) + +// aesgcm is the struct that holds an AES-GCM cipher for the S2A AEAD crypter. +type aesgcm struct { + aead cipher.AEAD +} + +// NewAESGCM creates an AES-GCM crypter instance. Note that the key must be +// either 128 bits or 256 bits. +func NewAESGCM(key []byte) (S2AAEADCrypter, error) { + if len(key) != AES128GCMKeySize && len(key) != AES256GCMKeySize { + return nil, fmt.Errorf("%d or %d bytes, given: %d", AES128GCMKeySize, AES256GCMKeySize, len(key)) + } + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + a, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + return &aesgcm{aead: a}, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext may +// fully overlap or not at all. +func (s *aesgcm) Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) { + return encrypt(s.aead, dst, plaintext, nonce, aad) +} + +func (s *aesgcm) Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) { + return decrypt(s.aead, dst, ciphertext, nonce, aad) +} + +func (s *aesgcm) TagSize() int { + return TagSize +} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go new file mode 100644 index 00000000..214df4ca --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package aeadcrypter + +import ( + "crypto/cipher" + "fmt" + + "golang.org/x/crypto/chacha20poly1305" +) + +// Supported key size in bytes. +const ( + Chacha20Poly1305KeySize = 32 +) + +// chachapoly is the struct that holds a CHACHA-POLY cipher for the S2A AEAD +// crypter. +type chachapoly struct { + aead cipher.AEAD +} + +// NewChachaPoly creates a Chacha-Poly crypter instance. Note that the key must +// be Chacha20Poly1305KeySize bytes in length. +func NewChachaPoly(key []byte) (S2AAEADCrypter, error) { + if len(key) != Chacha20Poly1305KeySize { + return nil, fmt.Errorf("%d bytes, given: %d", Chacha20Poly1305KeySize, len(key)) + } + c, err := chacha20poly1305.New(key) + if err != nil { + return nil, err + } + return &chachapoly{aead: c}, nil +} + +// Encrypt is the encryption function. dst can contain bytes at the beginning of +// the ciphertext that will not be encrypted but will be authenticated. If dst +// has enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext may +// fully overlap or not at all. +func (s *chachapoly) Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) { + return encrypt(s.aead, dst, plaintext, nonce, aad) +} + +func (s *chachapoly) Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) { + return decrypt(s.aead, dst, ciphertext, nonce, aad) +} + +func (s *chachapoly) TagSize() int { + return TagSize +} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go new file mode 100644 index 00000000..b3c36ad9 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go @@ -0,0 +1,92 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package aeadcrypter + +import ( + "crypto/cipher" + "fmt" +) + +const ( + // TagSize is the tag size in bytes for AES-128-GCM-SHA256, + // AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256. + TagSize = 16 + // NonceSize is the size of the nonce in number of bytes for + // AES-128-GCM-SHA256, AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256. + NonceSize = 12 + // SHA256DigestSize is the digest size of sha256 in bytes. + SHA256DigestSize = 32 + // SHA384DigestSize is the digest size of sha384 in bytes. + SHA384DigestSize = 48 +) + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return head, tail +} + +// encrypt is the encryption function for an AEAD crypter. aead determines +// the type of AEAD crypter. dst can contain bytes at the beginning of the +// ciphertext that will not be encrypted but will be authenticated. If dst has +// enough capacity to hold these bytes, the ciphertext and the tag, no +// allocation and copy operations will be performed. dst and plaintext may +// fully overlap or not at all. +func encrypt(aead cipher.AEAD, dst, plaintext, nonce, aad []byte) ([]byte, error) { + if len(nonce) != NonceSize { + return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce)) + } + // If we need to allocate an output buffer, we want to include space for + // the tag to avoid forcing the caller to reallocate as well. + dlen := len(dst) + dst, out := sliceForAppend(dst, len(plaintext)+TagSize) + data := out[:len(plaintext)] + copy(data, plaintext) // data may fully overlap plaintext + + // Seal appends the ciphertext and the tag to its first argument and + // returns the updated slice. However, sliceForAppend above ensures that + // dst has enough capacity to avoid a reallocation and copy due to the + // append. + dst = aead.Seal(dst[:dlen], nonce, data, aad) + return dst, nil +} + +// decrypt is the decryption function for an AEAD crypter, where aead determines +// the type of AEAD crypter, and dst the destination bytes for the decrypted +// ciphertext. The dst buffer may fully overlap with plaintext or not at all. +func decrypt(aead cipher.AEAD, dst, ciphertext, nonce, aad []byte) ([]byte, error) { + if len(nonce) != NonceSize { + return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce)) + } + // If dst is equal to ciphertext[:0], ciphertext storage is reused. + plaintext, err := aead.Open(dst, nonce, ciphertext, aad) + if err != nil { + return nil, fmt.Errorf("message auth failed: %v", err) + } + return plaintext, nil +} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go new file mode 100644 index 00000000..ddeaa6d7 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package halfconn + +import ( + "crypto/sha256" + "crypto/sha512" + "fmt" + "hash" + + s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" + "github.com/google/s2a-go/internal/record/internal/aeadcrypter" +) + +// ciphersuite is the interface for retrieving ciphersuite-specific information +// and utilities. +type ciphersuite interface { + // keySize returns the key size in bytes. This refers to the key used by + // the AEAD crypter. This is derived by calling HKDF expand on the traffic + // secret. + keySize() int + // nonceSize returns the nonce size in bytes. + nonceSize() int + // trafficSecretSize returns the traffic secret size in bytes. This refers + // to the secret used to derive the traffic key and nonce, as specified in + // https://tools.ietf.org/html/rfc8446#section-7. + trafficSecretSize() int + // hashFunction returns the hash function for the ciphersuite. + hashFunction() func() hash.Hash + // aeadCrypter takes a key and creates an AEAD crypter for the ciphersuite + // using that key. + aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) +} + +func newCiphersuite(ciphersuite s2apb.Ciphersuite) (ciphersuite, error) { + switch ciphersuite { + case s2apb.Ciphersuite_AES_128_GCM_SHA256: + return &aesgcm128sha256{}, nil + case s2apb.Ciphersuite_AES_256_GCM_SHA384: + return &aesgcm256sha384{}, nil + case s2apb.Ciphersuite_CHACHA20_POLY1305_SHA256: + return &chachapolysha256{}, nil + default: + return nil, fmt.Errorf("unrecognized ciphersuite: %v", ciphersuite) + } +} + +// aesgcm128sha256 is the AES-128-GCM-SHA256 implementation of the ciphersuite +// interface. +type aesgcm128sha256 struct{} + +func (aesgcm128sha256) keySize() int { return aeadcrypter.AES128GCMKeySize } +func (aesgcm128sha256) nonceSize() int { return aeadcrypter.NonceSize } +func (aesgcm128sha256) trafficSecretSize() int { return aeadcrypter.SHA256DigestSize } +func (aesgcm128sha256) hashFunction() func() hash.Hash { return sha256.New } +func (aesgcm128sha256) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { + return aeadcrypter.NewAESGCM(key) +} + +// aesgcm256sha384 is the AES-256-GCM-SHA384 implementation of the ciphersuite +// interface. +type aesgcm256sha384 struct{} + +func (aesgcm256sha384) keySize() int { return aeadcrypter.AES256GCMKeySize } +func (aesgcm256sha384) nonceSize() int { return aeadcrypter.NonceSize } +func (aesgcm256sha384) trafficSecretSize() int { return aeadcrypter.SHA384DigestSize } +func (aesgcm256sha384) hashFunction() func() hash.Hash { return sha512.New384 } +func (aesgcm256sha384) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { + return aeadcrypter.NewAESGCM(key) +} + +// chachapolysha256 is the ChaChaPoly-SHA256 implementation of the ciphersuite +// interface. +type chachapolysha256 struct{} + +func (chachapolysha256) keySize() int { return aeadcrypter.Chacha20Poly1305KeySize } +func (chachapolysha256) nonceSize() int { return aeadcrypter.NonceSize } +func (chachapolysha256) trafficSecretSize() int { return aeadcrypter.SHA256DigestSize } +func (chachapolysha256) hashFunction() func() hash.Hash { return sha256.New } +func (chachapolysha256) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) { + return aeadcrypter.NewChachaPoly(key) +} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go new file mode 100644 index 00000000..9499cdca --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go @@ -0,0 +1,60 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package halfconn + +import "errors" + +// counter is a 64-bit counter. +type counter struct { + val uint64 + hasOverflowed bool +} + +// newCounter creates a new counter with the initial value set to val. +func newCounter(val uint64) counter { + return counter{val: val} +} + +// value returns the current value of the counter. +func (c *counter) value() (uint64, error) { + if c.hasOverflowed { + return 0, errors.New("counter has overflowed") + } + return c.val, nil +} + +// increment increments the counter and checks for overflow. +func (c *counter) increment() { + // If the counter is already invalid due to overflow, there is no need to + // increase it. We check for the hasOverflowed flag in the call to value(). + if c.hasOverflowed { + return + } + c.val++ + if c.val == 0 { + c.hasOverflowed = true + } +} + +// reset sets the counter value to zero and sets the hasOverflowed flag to +// false. +func (c *counter) reset() { + c.val = 0 + c.hasOverflowed = false +} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go new file mode 100644 index 00000000..e05f2c36 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go @@ -0,0 +1,59 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package halfconn + +import ( + "fmt" + "hash" + + "golang.org/x/crypto/hkdf" +) + +// hkdfExpander is the interface for the HKDF expansion function; see +// https://tools.ietf.org/html/rfc5869 for details. its use in TLS 1.3 is +// specified in https://tools.ietf.org/html/rfc8446#section-7.2 +type hkdfExpander interface { + // expand takes a secret, a label, and the output length in bytes, and + // returns the resulting expanded key. + expand(secret, label []byte, length int) ([]byte, error) +} + +// defaultHKDFExpander is the default HKDF expander which uses Go's crypto/hkdf +// for HKDF expansion. +type defaultHKDFExpander struct { + h func() hash.Hash +} + +// newDefaultHKDFExpander creates an instance of the default HKDF expander +// using the given hash function. +func newDefaultHKDFExpander(h func() hash.Hash) hkdfExpander { + return &defaultHKDFExpander{h: h} +} + +func (d *defaultHKDFExpander) expand(secret, label []byte, length int) ([]byte, error) { + outBuf := make([]byte, length) + n, err := hkdf.Expand(d.h, secret, label).Read(outBuf) + if err != nil { + return nil, fmt.Errorf("hkdf.Expand.Read failed with error: %v", err) + } + if n < length { + return nil, fmt.Errorf("hkdf.Expand.Read returned unexpected length, got %d, want %d", n, length) + } + return outBuf, nil +} diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go new file mode 100644 index 00000000..dff99ff5 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go @@ -0,0 +1,193 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package halfconn manages the inbound or outbound traffic of a TLS 1.3 +// connection. +package halfconn + +import ( + "fmt" + "sync" + + s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" + "github.com/google/s2a-go/internal/record/internal/aeadcrypter" + "golang.org/x/crypto/cryptobyte" +) + +// The constants below were taken from Section 7.2 and 7.3 in +// https://tools.ietf.org/html/rfc8446#section-7. They are used as the label +// in HKDF-Expand-Label. +const ( + tls13Key = "tls13 key" + tls13Nonce = "tls13 iv" + tls13Update = "tls13 traffic upd" +) + +// S2AHalfConnection stores the state of the TLS 1.3 connection in the +// inbound or outbound direction. +type S2AHalfConnection struct { + cs ciphersuite + expander hkdfExpander + // mutex guards sequence, aeadCrypter, trafficSecret, and nonce. + mutex sync.Mutex + aeadCrypter aeadcrypter.S2AAEADCrypter + sequence counter + trafficSecret []byte + nonce []byte +} + +// New creates a new instance of S2AHalfConnection given a ciphersuite and a +// traffic secret. +func New(ciphersuite s2apb.Ciphersuite, trafficSecret []byte, sequence uint64) (*S2AHalfConnection, error) { + cs, err := newCiphersuite(ciphersuite) + if err != nil { + return nil, fmt.Errorf("failed to create new ciphersuite: %v", ciphersuite) + } + if cs.trafficSecretSize() != len(trafficSecret) { + return nil, fmt.Errorf("supplied traffic secret must be %v bytes, given: %v bytes", cs.trafficSecretSize(), len(trafficSecret)) + } + + hc := &S2AHalfConnection{cs: cs, expander: newDefaultHKDFExpander(cs.hashFunction()), sequence: newCounter(sequence), trafficSecret: trafficSecret} + if err = hc.updateCrypterAndNonce(hc.trafficSecret); err != nil { + return nil, fmt.Errorf("failed to create half connection using traffic secret: %v", err) + } + + return hc, nil +} + +// Encrypt encrypts the plaintext and computes the tag of dst and plaintext. +// dst and plaintext may fully overlap or not at all. Note that the sequence +// number will still be incremented on failure, unless the sequence has +// overflowed. +func (hc *S2AHalfConnection) Encrypt(dst, plaintext, aad []byte) ([]byte, error) { + hc.mutex.Lock() + sequence, err := hc.getAndIncrementSequence() + if err != nil { + hc.mutex.Unlock() + return nil, err + } + nonce := hc.maskedNonce(sequence) + crypter := hc.aeadCrypter + hc.mutex.Unlock() + return crypter.Encrypt(dst, plaintext, nonce, aad) +} + +// Decrypt decrypts ciphertext and verifies the tag. dst and ciphertext may +// fully overlap or not at all. Note that the sequence number will still be +// incremented on failure, unless the sequence has overflowed. +func (hc *S2AHalfConnection) Decrypt(dst, ciphertext, aad []byte) ([]byte, error) { + hc.mutex.Lock() + sequence, err := hc.getAndIncrementSequence() + if err != nil { + hc.mutex.Unlock() + return nil, err + } + nonce := hc.maskedNonce(sequence) + crypter := hc.aeadCrypter + hc.mutex.Unlock() + return crypter.Decrypt(dst, ciphertext, nonce, aad) +} + +// UpdateKey advances the traffic secret key, as specified in +// https://tools.ietf.org/html/rfc8446#section-7.2. In addition, it derives +// a new key and nonce, and resets the sequence number. +func (hc *S2AHalfConnection) UpdateKey() error { + hc.mutex.Lock() + defer hc.mutex.Unlock() + + var err error + hc.trafficSecret, err = hc.deriveSecret(hc.trafficSecret, []byte(tls13Update), hc.cs.trafficSecretSize()) + if err != nil { + return fmt.Errorf("failed to derive traffic secret: %v", err) + } + + if err = hc.updateCrypterAndNonce(hc.trafficSecret); err != nil { + return fmt.Errorf("failed to update half connection: %v", err) + } + + hc.sequence.reset() + return nil +} + +// TagSize returns the tag size in bytes of the underlying AEAD crypter. +func (hc *S2AHalfConnection) TagSize() int { + return hc.aeadCrypter.TagSize() +} + +// updateCrypterAndNonce takes a new traffic secret and updates the crypter +// and nonce. Note that the mutex must be held while calling this function. +func (hc *S2AHalfConnection) updateCrypterAndNonce(newTrafficSecret []byte) error { + key, err := hc.deriveSecret(newTrafficSecret, []byte(tls13Key), hc.cs.keySize()) + if err != nil { + return fmt.Errorf("failed to update key: %v", err) + } + + hc.nonce, err = hc.deriveSecret(newTrafficSecret, []byte(tls13Nonce), hc.cs.nonceSize()) + if err != nil { + return fmt.Errorf("failed to update nonce: %v", err) + } + + hc.aeadCrypter, err = hc.cs.aeadCrypter(key) + if err != nil { + return fmt.Errorf("failed to update AEAD crypter: %v", err) + } + return nil +} + +// getAndIncrement returns the current sequence number and increments it. Note +// that the mutex must be held while calling this function. +func (hc *S2AHalfConnection) getAndIncrementSequence() (uint64, error) { + sequence, err := hc.sequence.value() + if err != nil { + return 0, err + } + hc.sequence.increment() + return sequence, nil +} + +// maskedNonce creates a copy of the nonce that is masked with the sequence +// number. Note that the mutex must be held while calling this function. +func (hc *S2AHalfConnection) maskedNonce(sequence uint64) []byte { + const uint64Size = 8 + nonce := make([]byte, len(hc.nonce)) + copy(nonce, hc.nonce) + for i := 0; i < uint64Size; i++ { + nonce[aeadcrypter.NonceSize-uint64Size+i] ^= byte(sequence >> uint64(56-uint64Size*i)) + } + return nonce +} + +// deriveSecret implements the Derive-Secret function, as specified in +// https://tools.ietf.org/html/rfc8446#section-7.1. +func (hc *S2AHalfConnection) deriveSecret(secret, label []byte, length int) ([]byte, error) { + var hkdfLabel cryptobyte.Builder + hkdfLabel.AddUint16(uint16(length)) + hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes(label) + }) + // Append an empty `Context` field to the label, as specified in the RFC. + // The half connection does not use the `Context` field. + hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) { + b.AddBytes([]byte("")) + }) + hkdfLabelBytes, err := hkdfLabel.Bytes() + if err != nil { + return nil, fmt.Errorf("deriveSecret failed: %v", err) + } + return hc.expander.expand(secret, hkdfLabelBytes, length) +} diff --git a/vendor/github.com/google/s2a-go/internal/record/record.go b/vendor/github.com/google/s2a-go/internal/record/record.go new file mode 100644 index 00000000..c6051551 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/record/record.go @@ -0,0 +1,757 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package record implements the TLS 1.3 record protocol used by the S2A +// transport credentials. +package record + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "net" + "sync" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + "github.com/google/s2a-go/internal/record/internal/halfconn" + "github.com/google/s2a-go/internal/tokenmanager" + "google.golang.org/grpc/grpclog" +) + +// recordType is the `ContentType` as described in +// https://tools.ietf.org/html/rfc8446#section-5.1. +type recordType byte + +const ( + alert recordType = 21 + handshake recordType = 22 + applicationData recordType = 23 +) + +// keyUpdateRequest is the `KeyUpdateRequest` as described in +// https://tools.ietf.org/html/rfc8446#section-4.6.3. +type keyUpdateRequest byte + +const ( + updateNotRequested keyUpdateRequest = 0 + updateRequested keyUpdateRequest = 1 +) + +// alertDescription is the `AlertDescription` as described in +// https://tools.ietf.org/html/rfc8446#section-6. +type alertDescription byte + +const ( + closeNotify alertDescription = 0 +) + +// sessionTicketState is used to determine whether session tickets have not yet +// been received, are in the process of being received, or have finished +// receiving. +type sessionTicketState byte + +const ( + ticketsNotYetReceived sessionTicketState = 0 + receivingTickets sessionTicketState = 1 + notReceivingTickets sessionTicketState = 2 +) + +const ( + // The TLS 1.3-specific constants below (tlsRecordMaxPlaintextSize, + // tlsRecordHeaderSize, tlsRecordTypeSize) were taken from + // https://tools.ietf.org/html/rfc8446#section-5.1. + + // tlsRecordMaxPlaintextSize is the maximum size in bytes of the plaintext + // in a single TLS 1.3 record. + tlsRecordMaxPlaintextSize = 16384 // 2^14 + // tlsRecordTypeSize is the size in bytes of the TLS 1.3 record type. + tlsRecordTypeSize = 1 + // tlsTagSize is the size in bytes of the tag of the following three + // ciphersuites: AES-128-GCM-SHA256, AES-256-GCM-SHA384, + // CHACHA20-POLY1305-SHA256. + tlsTagSize = 16 + // tlsRecordMaxPayloadSize is the maximum size in bytes of the payload in a + // single TLS 1.3 record. This is the maximum size of the plaintext plus the + // record type byte and 16 bytes of the tag. + tlsRecordMaxPayloadSize = tlsRecordMaxPlaintextSize + tlsRecordTypeSize + tlsTagSize + // tlsRecordHeaderTypeSize is the size in bytes of the TLS 1.3 record + // header type. + tlsRecordHeaderTypeSize = 1 + // tlsRecordHeaderLegacyRecordVersionSize is the size in bytes of the TLS + // 1.3 record header legacy record version. + tlsRecordHeaderLegacyRecordVersionSize = 2 + // tlsRecordHeaderPayloadLengthSize is the size in bytes of the TLS 1.3 + // record header payload length. + tlsRecordHeaderPayloadLengthSize = 2 + // tlsRecordHeaderSize is the size in bytes of the TLS 1.3 record header. + tlsRecordHeaderSize = tlsRecordHeaderTypeSize + tlsRecordHeaderLegacyRecordVersionSize + tlsRecordHeaderPayloadLengthSize + // tlsRecordMaxSize + tlsRecordMaxSize = tlsRecordMaxPayloadSize + tlsRecordHeaderSize + // tlsApplicationData is the application data type of the TLS 1.3 record + // header. + tlsApplicationData = 23 + // tlsLegacyRecordVersion is the legacy record version of the TLS record. + tlsLegacyRecordVersion = 3 + // tlsAlertSize is the size in bytes of an alert of TLS 1.3. + tlsAlertSize = 2 +) + +const ( + // These are TLS 1.3 handshake-specific constants. + + // tlsHandshakeNewSessionTicketType is the prefix of a handshake new session + // ticket message of TLS 1.3. + tlsHandshakeNewSessionTicketType = 4 + // tlsHandshakeKeyUpdateType is the prefix of a handshake key update message + // of TLS 1.3. + tlsHandshakeKeyUpdateType = 24 + // tlsHandshakeMsgTypeSize is the size in bytes of the TLS 1.3 handshake + // message type field. + tlsHandshakeMsgTypeSize = 1 + // tlsHandshakeLengthSize is the size in bytes of the TLS 1.3 handshake + // message length field. + tlsHandshakeLengthSize = 3 + // tlsHandshakeKeyUpdateMsgSize is the size in bytes of the TLS 1.3 + // handshake key update message. + tlsHandshakeKeyUpdateMsgSize = 1 + // tlsHandshakePrefixSize is the size in bytes of the prefix of the TLS 1.3 + // handshake message. + tlsHandshakePrefixSize = 4 + // tlsMaxSessionTicketSize is the maximum size of a NewSessionTicket message + // in TLS 1.3. This is the sum of the max sizes of all the fields in the + // NewSessionTicket struct specified in + // https://tools.ietf.org/html/rfc8446#section-4.6.1. + tlsMaxSessionTicketSize = 131338 +) + +const ( + // outBufMaxRecords is the maximum number of records that can fit in the + // ourRecordsBuf buffer. + outBufMaxRecords = 16 + // outBufMaxSize is the maximum size (in bytes) of the outRecordsBuf buffer. + outBufMaxSize = outBufMaxRecords * tlsRecordMaxSize + // maxAllowedTickets is the maximum number of session tickets that are + // allowed. The number of tickets are limited to ensure that the size of the + // ticket queue does not grow indefinitely. S2A also keeps a limit on the + // number of tickets that it caches. + maxAllowedTickets = 5 +) + +// preConstructedKeyUpdateMsg holds the key update message. This is needed as an +// optimization so that the same message does not need to be constructed every +// time a key update message is sent. +var preConstructedKeyUpdateMsg = buildKeyUpdateRequest() + +// conn represents a secured TLS connection. It implements the net.Conn +// interface. +type conn struct { + net.Conn + // inConn is the half connection responsible for decrypting incoming bytes. + inConn *halfconn.S2AHalfConnection + // outConn is the half connection responsible for encrypting outgoing bytes. + outConn *halfconn.S2AHalfConnection + // pendingApplicationData holds data that has been read from the connection + // and decrypted, but has not yet been returned by Read. + pendingApplicationData []byte + // unusedBuf holds data read from the network that has not yet been + // decrypted. This data might not consist of a complete record. It may + // consist of several records, the last of which could be incomplete. + unusedBuf []byte + // outRecordsBuf is a buffer used to store outgoing TLS records before + // they are written to the network. + outRecordsBuf []byte + // nextRecord stores the next record info in the unusedBuf buffer. + nextRecord []byte + // overheadSize is the overhead size in bytes of each TLS 1.3 record, which + // is computed as overheadSize = header size + record type byte + tag size. + // Note that there is no padding by zeros in the overhead calculation. + overheadSize int + // readMutex guards against concurrent calls to Read. This is required since + // Close may be called during a Read. + readMutex sync.Mutex + // writeMutex guards against concurrent calls to Write. This is required + // since Close may be called during a Write, and also because a key update + // message may be written during a Read. + writeMutex sync.Mutex + // handshakeBuf holds handshake messages while they are being processed. + handshakeBuf []byte + // ticketState is the current processing state of the session tickets. + ticketState sessionTicketState + // sessionTickets holds the completed session tickets until they are sent to + // the handshaker service for processing. + sessionTickets [][]byte + // ticketSender sends session tickets to the S2A handshaker service. + ticketSender s2aTicketSender + // callComplete is a channel that blocks closing the record protocol until a + // pending call to the S2A completes. + callComplete chan bool +} + +// ConnParameters holds the parameters used for creating a new conn object. +type ConnParameters struct { + // NetConn is the TCP connection to the peer. This parameter is required. + NetConn net.Conn + // Ciphersuite is the TLS ciphersuite negotiated by the S2A handshaker + // service. This parameter is required. + Ciphersuite commonpb.Ciphersuite + // TLSVersion is the TLS version number negotiated by the S2A handshaker + // service. This parameter is required. + TLSVersion commonpb.TLSVersion + // InTrafficSecret is the traffic secret used to derive the session key for + // the inbound direction. This parameter is required. + InTrafficSecret []byte + // OutTrafficSecret is the traffic secret used to derive the session key + // for the outbound direction. This parameter is required. + OutTrafficSecret []byte + // UnusedBuf is the data read from the network that has not yet been + // decrypted. This parameter is optional. If not provided, then no + // application data was sent in the same flight of messages as the final + // handshake message. + UnusedBuf []byte + // InSequence is the sequence number of the next, incoming, TLS record. + // This parameter is required. + InSequence uint64 + // OutSequence is the sequence number of the next, outgoing, TLS record. + // This parameter is required. + OutSequence uint64 + // HSAddr stores the address of the S2A handshaker service. This parameter + // is optional. If not provided, then TLS resumption is disabled. + HSAddr string + // ConnectionId is the connection identifier that was created and sent by + // S2A at the end of a handshake. + ConnectionID uint64 + // LocalIdentity is the local identity that was used by S2A during session + // setup and included in the session result. + LocalIdentity *commonpb.Identity + // EnsureProcessSessionTickets allows users to wait and ensure that all + // available session tickets are sent to S2A before a process completes. + EnsureProcessSessionTickets *sync.WaitGroup +} + +// NewConn creates a TLS record protocol that wraps the TCP connection. +func NewConn(o *ConnParameters) (net.Conn, error) { + if o == nil { + return nil, errors.New("conn options must not be nil") + } + if o.TLSVersion != commonpb.TLSVersion_TLS1_3 { + return nil, errors.New("TLS version must be TLS 1.3") + } + + inConn, err := halfconn.New(o.Ciphersuite, o.InTrafficSecret, o.InSequence) + if err != nil { + return nil, fmt.Errorf("failed to create inbound half connection: %v", err) + } + outConn, err := halfconn.New(o.Ciphersuite, o.OutTrafficSecret, o.OutSequence) + if err != nil { + return nil, fmt.Errorf("failed to create outbound half connection: %v", err) + } + + // The tag size for the in/out connections should be the same. + overheadSize := tlsRecordHeaderSize + tlsRecordTypeSize + inConn.TagSize() + var unusedBuf []byte + if o.UnusedBuf == nil { + // We pre-allocate unusedBuf to be of size + // 2*tlsRecordMaxSize-1 during initialization. We only read from the + // network into unusedBuf when unusedBuf does not contain a complete + // record and the incomplete record is at most tlsRecordMaxSize-1 + // (bytes). And we read at most tlsRecordMaxSize bytes of data from the + // network into unusedBuf at one time. Therefore, 2*tlsRecordMaxSize-1 + // is large enough to buffer data read from the network. + unusedBuf = make([]byte, 0, 2*tlsRecordMaxSize-1) + } else { + unusedBuf = make([]byte, len(o.UnusedBuf)) + copy(unusedBuf, o.UnusedBuf) + } + + tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + if err != nil { + grpclog.Infof("failed to create single token access token manager: %v", err) + } + + s2aConn := &conn{ + Conn: o.NetConn, + inConn: inConn, + outConn: outConn, + unusedBuf: unusedBuf, + outRecordsBuf: make([]byte, tlsRecordMaxSize), + nextRecord: unusedBuf, + overheadSize: overheadSize, + ticketState: ticketsNotYetReceived, + // Pre-allocate the buffer for one session ticket message and the max + // plaintext size. This is the largest size that handshakeBuf will need + // to hold. The largest incomplete handshake message is the + // [handshake header size] + [max session ticket size] - 1. + // Then, tlsRecordMaxPlaintextSize is the maximum size that will be + // appended to the handshakeBuf before the handshake message is + // completed. Therefore, the buffer size below should be large enough to + // buffer any handshake messages. + handshakeBuf: make([]byte, 0, tlsHandshakePrefixSize+tlsMaxSessionTicketSize+tlsRecordMaxPlaintextSize-1), + ticketSender: &ticketSender{ + hsAddr: o.HSAddr, + connectionID: o.ConnectionID, + localIdentity: o.LocalIdentity, + tokenManager: tokenManager, + ensureProcessSessionTickets: o.EnsureProcessSessionTickets, + }, + callComplete: make(chan bool), + } + return s2aConn, nil +} + +// Read reads and decrypts a TLS 1.3 record from the underlying connection, and +// copies any application data received from the peer into b. If the size of the +// payload is greater than len(b), Read retains the remaining bytes in an +// internal buffer, and subsequent calls to Read will read from this buffer +// until it is exhausted. At most 1 TLS record worth of application data is +// written to b for each call to Read. +// +// Note that for the user to efficiently call this method, the user should +// ensure that the buffer b is allocated such that the buffer does not have any +// unused segments. This can be done by calling Read via io.ReadFull, which +// continually calls Read until the specified buffer has been filled. Also note +// that the user should close the connection via Close() if an error is thrown +// by a call to Read. +func (p *conn) Read(b []byte) (n int, err error) { + p.readMutex.Lock() + defer p.readMutex.Unlock() + // Check if p.pendingApplication data has leftover application data from + // the previous call to Read. + if len(p.pendingApplicationData) == 0 { + // Read a full record from the wire. + record, err := p.readFullRecord() + if err != nil { + return 0, err + } + // Now we have a complete record, so split the header and validate it + // The TLS record is split into 2 pieces: the record header and the + // payload. The payload has the following form: + // [payload] = [ciphertext of application data] + // + [ciphertext of record type byte] + // + [(optionally) ciphertext of padding by zeros] + // + [tag] + header, payload, err := splitAndValidateHeader(record) + if err != nil { + return 0, err + } + // Decrypt the ciphertext. + p.pendingApplicationData, err = p.inConn.Decrypt(payload[:0], payload, header) + if err != nil { + return 0, err + } + // Remove the padding by zeros and the record type byte from the + // p.pendingApplicationData buffer. + msgType, err := p.stripPaddingAndType() + if err != nil { + return 0, err + } + // Check that the length of the plaintext after stripping the padding + // and record type byte is under the maximum plaintext size. + if len(p.pendingApplicationData) > tlsRecordMaxPlaintextSize { + return 0, errors.New("plaintext size larger than maximum") + } + // The expected message types are application data, alert, and + // handshake. For application data, the bytes are directly copied into + // b. For an alert, the type of the alert is checked and the connection + // is closed on a close notify alert. For a handshake message, the + // handshake message type is checked. The handshake message type can be + // a key update type, for which we advance the traffic secret, and a + // new session ticket type, for which we send the received ticket to S2A + // for processing. + switch msgType { + case applicationData: + if len(p.handshakeBuf) > 0 { + return 0, errors.New("application data received while processing fragmented handshake messages") + } + if p.ticketState == receivingTickets { + p.ticketState = notReceivingTickets + grpclog.Infof("Sending session tickets to S2A.") + p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) + } + case alert: + return 0, p.handleAlertMessage() + case handshake: + if err = p.handleHandshakeMessage(); err != nil { + return 0, err + } + return 0, nil + default: + return 0, errors.New("unknown record type") + } + } + // Write as much application data as possible to b, the output buffer. + n = copy(b, p.pendingApplicationData) + p.pendingApplicationData = p.pendingApplicationData[n:] + return n, nil +} + +// Write divides b into segments of size tlsRecordMaxPlaintextSize, builds a +// TLS 1.3 record (of type "application data") from each segment, and sends +// the record to the peer. It returns the number of plaintext bytes that were +// successfully sent to the peer. +func (p *conn) Write(b []byte) (n int, err error) { + p.writeMutex.Lock() + defer p.writeMutex.Unlock() + return p.writeTLSRecord(b, tlsApplicationData) +} + +// writeTLSRecord divides b into segments of size maxPlaintextBytesPerRecord, +// builds a TLS 1.3 record (of type recordType) from each segment, and sends +// the record to the peer. It returns the number of plaintext bytes that were +// successfully sent to the peer. +func (p *conn) writeTLSRecord(b []byte, recordType byte) (n int, err error) { + // Create a record of only header, record type, and tag if given empty + // byte array. + if len(b) == 0 { + recordEndIndex, _, err := p.buildRecord(b, recordType, 0) + if err != nil { + return 0, err + } + + // Write the bytes stored in outRecordsBuf to p.Conn. Since we return + // the number of plaintext bytes written without overhead, we will + // always return 0 while p.Conn.Write returns the entire record length. + _, err = p.Conn.Write(p.outRecordsBuf[:recordEndIndex]) + return 0, err + } + + numRecords := int(math.Ceil(float64(len(b)) / float64(tlsRecordMaxPlaintextSize))) + totalRecordsSize := len(b) + numRecords*p.overheadSize + partialBSize := len(b) + if totalRecordsSize > outBufMaxSize { + totalRecordsSize = outBufMaxSize + partialBSize = outBufMaxRecords * tlsRecordMaxPlaintextSize + } + if len(p.outRecordsBuf) < totalRecordsSize { + p.outRecordsBuf = make([]byte, totalRecordsSize) + } + for bStart := 0; bStart < len(b); bStart += partialBSize { + bEnd := bStart + partialBSize + if bEnd > len(b) { + bEnd = len(b) + } + partialB := b[bStart:bEnd] + recordEndIndex := 0 + for len(partialB) > 0 { + recordEndIndex, partialB, err = p.buildRecord(partialB, recordType, recordEndIndex) + if err != nil { + // Return the amount of bytes written prior to the error. + return bStart, err + } + } + // Write the bytes stored in outRecordsBuf to p.Conn. If there is an + // error, calculate the total number of plaintext bytes of complete + // records successfully written to the peer and return it. + nn, err := p.Conn.Write(p.outRecordsBuf[:recordEndIndex]) + if err != nil { + numberOfCompletedRecords := int(math.Floor(float64(nn) / float64(tlsRecordMaxSize))) + return bStart + numberOfCompletedRecords*tlsRecordMaxPlaintextSize, err + } + } + return len(b), nil +} + +// buildRecord builds a TLS 1.3 record of type recordType from plaintext, +// and writes the record to outRecordsBuf at recordStartIndex. The record will +// have at most tlsRecordMaxPlaintextSize bytes of payload. It returns the +// index of outRecordsBuf where the current record ends, as well as any +// remaining plaintext bytes. +func (p *conn) buildRecord(plaintext []byte, recordType byte, recordStartIndex int) (n int, remainingPlaintext []byte, err error) { + // Construct the payload, which consists of application data and record type. + dataLen := len(plaintext) + if dataLen > tlsRecordMaxPlaintextSize { + dataLen = tlsRecordMaxPlaintextSize + } + remainingPlaintext = plaintext[dataLen:] + newRecordBuf := p.outRecordsBuf[recordStartIndex:] + + copy(newRecordBuf[tlsRecordHeaderSize:], plaintext[:dataLen]) + newRecordBuf[tlsRecordHeaderSize+dataLen] = recordType + payload := newRecordBuf[tlsRecordHeaderSize : tlsRecordHeaderSize+dataLen+1] // 1 is for the recordType. + // Construct the header. + newRecordBuf[0] = tlsApplicationData + newRecordBuf[1] = tlsLegacyRecordVersion + newRecordBuf[2] = tlsLegacyRecordVersion + binary.BigEndian.PutUint16(newRecordBuf[3:], uint16(len(payload)+tlsTagSize)) + header := newRecordBuf[:tlsRecordHeaderSize] + + // Encrypt the payload using header as aad. + encryptedPayload, err := p.outConn.Encrypt(newRecordBuf[tlsRecordHeaderSize:][:0], payload, header) + if err != nil { + return 0, plaintext, err + } + recordStartIndex += len(header) + len(encryptedPayload) + return recordStartIndex, remainingPlaintext, nil +} + +func (p *conn) Close() error { + p.readMutex.Lock() + defer p.readMutex.Unlock() + p.writeMutex.Lock() + defer p.writeMutex.Unlock() + // If p.ticketState is equal to notReceivingTickets, then S2A has + // been sent a flight of session tickets, and we must wait for the + // call to S2A to complete before closing the record protocol. + if p.ticketState == notReceivingTickets { + <-p.callComplete + grpclog.Infof("Safe to close the connection because sending tickets to S2A is (already) complete.") + } + return p.Conn.Close() +} + +// stripPaddingAndType strips the padding by zeros and record type from +// p.pendingApplicationData and returns the record type. Note that +// p.pendingApplicationData should be of the form: +// [application data] + [record type byte] + [trailing zeros] +func (p *conn) stripPaddingAndType() (recordType, error) { + if len(p.pendingApplicationData) == 0 { + return 0, errors.New("application data had length 0") + } + i := len(p.pendingApplicationData) - 1 + // Search for the index of the record type byte. + for i > 0 { + if p.pendingApplicationData[i] != 0 { + break + } + i-- + } + rt := recordType(p.pendingApplicationData[i]) + p.pendingApplicationData = p.pendingApplicationData[:i] + return rt, nil +} + +// readFullRecord reads from the wire until a record is completed and returns +// the full record. +func (p *conn) readFullRecord() (fullRecord []byte, err error) { + fullRecord, p.nextRecord, err = parseReadBuffer(p.nextRecord, tlsRecordMaxPayloadSize) + if err != nil { + return nil, err + } + // Check whether the next record to be decrypted has been completely + // received. + if len(fullRecord) == 0 { + copy(p.unusedBuf, p.nextRecord) + p.unusedBuf = p.unusedBuf[:len(p.nextRecord)] + // Always copy next incomplete record to the beginning of the + // unusedBuf buffer and reset nextRecord to it. + p.nextRecord = p.unusedBuf + } + // Keep reading from the wire until we have a complete record. + for len(fullRecord) == 0 { + if len(p.unusedBuf) == cap(p.unusedBuf) { + tmp := make([]byte, len(p.unusedBuf), cap(p.unusedBuf)+tlsRecordMaxSize) + copy(tmp, p.unusedBuf) + p.unusedBuf = tmp + } + n, err := p.Conn.Read(p.unusedBuf[len(p.unusedBuf):min(cap(p.unusedBuf), len(p.unusedBuf)+tlsRecordMaxSize)]) + if err != nil { + return nil, err + } + p.unusedBuf = p.unusedBuf[:len(p.unusedBuf)+n] + fullRecord, p.nextRecord, err = parseReadBuffer(p.unusedBuf, tlsRecordMaxPayloadSize) + if err != nil { + return nil, err + } + } + return fullRecord, nil +} + +// parseReadBuffer parses the provided buffer and returns a full record and any +// remaining bytes in that buffer. If the record is incomplete, nil is returned +// for the first return value and the given byte buffer is returned for the +// second return value. The length of the payload specified by the header should +// not be greater than maxLen, otherwise an error is returned. Note that this +// function does not allocate or copy any buffers. +func parseReadBuffer(b []byte, maxLen uint16) (fullRecord, remaining []byte, err error) { + // If the header is not complete, return the provided buffer as remaining + // buffer. + if len(b) < tlsRecordHeaderSize { + return nil, b, nil + } + msgLenField := b[tlsRecordHeaderTypeSize+tlsRecordHeaderLegacyRecordVersionSize : tlsRecordHeaderSize] + length := binary.BigEndian.Uint16(msgLenField) + if length > maxLen { + return nil, nil, fmt.Errorf("record length larger than the limit %d", maxLen) + } + if len(b) < int(length)+tlsRecordHeaderSize { + // Record is not complete yet. + return nil, b, nil + } + return b[:tlsRecordHeaderSize+length], b[tlsRecordHeaderSize+length:], nil +} + +// splitAndValidateHeader splits the header from the payload in the TLS 1.3 +// record and returns them. Note that the header is checked for validity, and an +// error is returned when an invalid header is parsed. Also note that this +// function does not allocate or copy any buffers. +func splitAndValidateHeader(record []byte) (header, payload []byte, err error) { + if len(record) < tlsRecordHeaderSize { + return nil, nil, fmt.Errorf("record was smaller than the header size") + } + header = record[:tlsRecordHeaderSize] + payload = record[tlsRecordHeaderSize:] + if header[0] != tlsApplicationData { + return nil, nil, fmt.Errorf("incorrect type in the header") + } + // Check the legacy record version, which should be 0x03, 0x03. + if header[1] != 0x03 || header[2] != 0x03 { + return nil, nil, fmt.Errorf("incorrect legacy record version in the header") + } + return header, payload, nil +} + +// handleAlertMessage handles an alert message. +func (p *conn) handleAlertMessage() error { + if len(p.pendingApplicationData) != tlsAlertSize { + return errors.New("invalid alert message size") + } + alertType := p.pendingApplicationData[1] + // Clear the body of the alert message. + p.pendingApplicationData = p.pendingApplicationData[:0] + if alertType == byte(closeNotify) { + return errors.New("received a close notify alert") + } + // TODO(matthewstevenson88): Add support for more alert types. + return fmt.Errorf("received an unrecognized alert type: %v", alertType) +} + +// parseHandshakeHeader parses a handshake message from the handshake buffer. +// It returns the message type, the message length, the message, the raw message +// that includes the type and length bytes and a flag indicating whether the +// handshake message has been fully parsed. i.e. whether the entire handshake +// message was in the handshake buffer. +func (p *conn) parseHandshakeMsg() (msgType byte, msgLen uint32, msg []byte, rawMsg []byte, ok bool) { + // Handle the case where the 4 byte handshake header is fragmented. + if len(p.handshakeBuf) < tlsHandshakePrefixSize { + return 0, 0, nil, nil, false + } + msgType = p.handshakeBuf[0] + msgLen = bigEndianInt24(p.handshakeBuf[tlsHandshakeMsgTypeSize : tlsHandshakeMsgTypeSize+tlsHandshakeLengthSize]) + if msgLen > uint32(len(p.handshakeBuf)-tlsHandshakePrefixSize) { + return 0, 0, nil, nil, false + } + msg = p.handshakeBuf[tlsHandshakePrefixSize : tlsHandshakePrefixSize+msgLen] + rawMsg = p.handshakeBuf[:tlsHandshakeMsgTypeSize+tlsHandshakeLengthSize+msgLen] + p.handshakeBuf = p.handshakeBuf[tlsHandshakePrefixSize+msgLen:] + return msgType, msgLen, msg, rawMsg, true +} + +// handleHandshakeMessage handles a handshake message. Note that the first +// complete handshake message from the handshake buffer is removed, if it +// exists. +func (p *conn) handleHandshakeMessage() error { + // Copy the pending application data to the handshake buffer. At this point, + // we are guaranteed that the pending application data contains only parts + // of a handshake message. + p.handshakeBuf = append(p.handshakeBuf, p.pendingApplicationData...) + p.pendingApplicationData = p.pendingApplicationData[:0] + // Several handshake messages may be coalesced into a single record. + // Continue reading them until the handshake buffer is empty. + for len(p.handshakeBuf) > 0 { + handshakeMsgType, msgLen, msg, rawMsg, ok := p.parseHandshakeMsg() + if !ok { + // The handshake could not be fully parsed, so read in another + // record and try again later. + break + } + switch handshakeMsgType { + case tlsHandshakeKeyUpdateType: + if msgLen != tlsHandshakeKeyUpdateMsgSize { + return errors.New("invalid handshake key update message length") + } + if len(p.handshakeBuf) != 0 { + return errors.New("key update message must be the last message of a handshake record") + } + if err := p.handleKeyUpdateMsg(msg); err != nil { + return err + } + case tlsHandshakeNewSessionTicketType: + // Ignore tickets that are received after a batch of tickets has + // been sent to S2A. + if p.ticketState == notReceivingTickets { + continue + } + if p.ticketState == ticketsNotYetReceived { + p.ticketState = receivingTickets + } + p.sessionTickets = append(p.sessionTickets, rawMsg) + if len(p.sessionTickets) == maxAllowedTickets { + p.ticketState = notReceivingTickets + grpclog.Infof("Sending session tickets to S2A.") + p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete) + } + default: + return errors.New("unknown handshake message type") + } + } + return nil +} + +func buildKeyUpdateRequest() []byte { + b := make([]byte, tlsHandshakePrefixSize+tlsHandshakeKeyUpdateMsgSize) + b[0] = tlsHandshakeKeyUpdateType + b[1] = 0 + b[2] = 0 + b[3] = tlsHandshakeKeyUpdateMsgSize + b[4] = byte(updateNotRequested) + return b +} + +// handleKeyUpdateMsg handles a key update message. +func (p *conn) handleKeyUpdateMsg(msg []byte) error { + keyUpdateRequest := msg[0] + if keyUpdateRequest != byte(updateNotRequested) && + keyUpdateRequest != byte(updateRequested) { + return errors.New("invalid handshake key update message") + } + if err := p.inConn.UpdateKey(); err != nil { + return err + } + // Send a key update message back to the peer if requested. + if keyUpdateRequest == byte(updateRequested) { + p.writeMutex.Lock() + defer p.writeMutex.Unlock() + n, err := p.writeTLSRecord(preConstructedKeyUpdateMsg, byte(handshake)) + if err != nil { + return err + } + if n != tlsHandshakePrefixSize+tlsHandshakeKeyUpdateMsgSize { + return errors.New("key update request message wrote less bytes than expected") + } + if err = p.outConn.UpdateKey(); err != nil { + return err + } + } + return nil +} + +// bidEndianInt24 converts the given byte buffer of at least size 3 and +// outputs the resulting 24 bit integer as a uint32. This is needed because +// TLS 1.3 requires 3 byte integers, and the binary.BigEndian package does +// not provide a way to transform a byte buffer into a 3 byte integer. +func bigEndianInt24(b []byte) uint32 { + _ = b[2] // bounds check hint to compiler; see golang.org/issue/14808 + return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16 +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/google/s2a-go/internal/record/ticketsender.go b/vendor/github.com/google/s2a-go/internal/record/ticketsender.go new file mode 100644 index 00000000..33fa3c55 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/record/ticketsender.go @@ -0,0 +1,176 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package record + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/google/s2a-go/internal/handshaker/service" + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + s2apb "github.com/google/s2a-go/internal/proto/s2a_go_proto" + "github.com/google/s2a-go/internal/tokenmanager" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" +) + +// sessionTimeout is the timeout for creating a session with the S2A handshaker +// service. +const sessionTimeout = time.Second * 5 + +// s2aTicketSender sends session tickets to the S2A handshaker service. +type s2aTicketSender interface { + // sendTicketsToS2A sends the given session tickets to the S2A handshaker + // service. + sendTicketsToS2A(sessionTickets [][]byte, callComplete chan bool) +} + +// ticketStream is the stream used to send and receive session information. +type ticketStream interface { + Send(*s2apb.SessionReq) error + Recv() (*s2apb.SessionResp, error) +} + +type ticketSender struct { + // hsAddr stores the address of the S2A handshaker service. + hsAddr string + // connectionID is the connection identifier that was created and sent by + // S2A at the end of a handshake. + connectionID uint64 + // localIdentity is the local identity that was used by S2A during session + // setup and included in the session result. + localIdentity *commonpb.Identity + // tokenManager manages access tokens for authenticating to S2A. + tokenManager tokenmanager.AccessTokenManager + // ensureProcessSessionTickets allows users to wait and ensure that all + // available session tickets are sent to S2A before a process completes. + ensureProcessSessionTickets *sync.WaitGroup +} + +// sendTicketsToS2A sends the given sessionTickets to the S2A handshaker +// service. This is done asynchronously and writes to the error logs if an error +// occurs. +func (t *ticketSender) sendTicketsToS2A(sessionTickets [][]byte, callComplete chan bool) { + // Note that the goroutine is in the function rather than at the caller + // because the fake ticket sender used for testing must run synchronously + // so that the session tickets can be accessed from it after the tests have + // been run. + if t.ensureProcessSessionTickets != nil { + t.ensureProcessSessionTickets.Add(1) + } + go func() { + if err := func() error { + defer func() { + if t.ensureProcessSessionTickets != nil { + t.ensureProcessSessionTickets.Done() + } + }() + hsConn, err := service.Dial(t.hsAddr) + if err != nil { + return err + } + client := s2apb.NewS2AServiceClient(hsConn) + ctx, cancel := context.WithTimeout(context.Background(), sessionTimeout) + defer cancel() + session, err := client.SetUpSession(ctx) + if err != nil { + return err + } + defer func() { + if err := session.CloseSend(); err != nil { + grpclog.Error(err) + } + }() + return t.writeTicketsToStream(session, sessionTickets) + }(); err != nil { + grpclog.Errorf("failed to send resumption tickets to S2A with identity: %v, %v", + t.localIdentity, err) + } + callComplete <- true + close(callComplete) + }() +} + +// writeTicketsToStream writes the given session tickets to the given stream. +func (t *ticketSender) writeTicketsToStream(stream ticketStream, sessionTickets [][]byte) error { + if err := stream.Send( + &s2apb.SessionReq{ + ReqOneof: &s2apb.SessionReq_ResumptionTicket{ + ResumptionTicket: &s2apb.ResumptionTicketReq{ + InBytes: sessionTickets, + ConnectionId: t.connectionID, + LocalIdentity: t.localIdentity, + }, + }, + AuthMechanisms: t.getAuthMechanisms(), + }, + ); err != nil { + return err + } + sessionResp, err := stream.Recv() + if err != nil { + return err + } + if sessionResp.GetStatus().GetCode() != uint32(codes.OK) { + return fmt.Errorf("s2a session ticket response had error status: %v, %v", + sessionResp.GetStatus().GetCode(), sessionResp.GetStatus().GetDetails()) + } + return nil +} + +func (t *ticketSender) getAuthMechanisms() []*s2apb.AuthenticationMechanism { + if t.tokenManager == nil { + return nil + } + // First handle the special case when no local identity has been provided + // by the application. In this case, an AuthenticationMechanism with no local + // identity will be sent. + if t.localIdentity == nil { + token, err := t.tokenManager.DefaultToken() + if err != nil { + grpclog.Infof("unable to get token for empty local identity: %v", err) + return nil + } + return []*s2apb.AuthenticationMechanism{ + { + MechanismOneof: &s2apb.AuthenticationMechanism_Token{ + Token: token, + }, + }, + } + } + + // Next, handle the case where the application (or the S2A) has specified + // a local identity. + token, err := t.tokenManager.Token(t.localIdentity) + if err != nil { + grpclog.Infof("unable to get token for local identity %v: %v", t.localIdentity, err) + return nil + } + return []*s2apb.AuthenticationMechanism{ + { + Identity: t.localIdentity, + MechanismOneof: &s2apb.AuthenticationMechanism_Token{ + Token: token, + }, + }, + } +} diff --git a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go new file mode 100644 index 00000000..ec96ba3b --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tokenmanager provides tokens for authenticating to S2A. +package tokenmanager + +import ( + "fmt" + "os" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" +) + +const ( + s2aAccessTokenEnvironmentVariable = "S2A_ACCESS_TOKEN" +) + +// AccessTokenManager manages tokens for authenticating to S2A. +type AccessTokenManager interface { + // DefaultToken returns a token that an application with no specified local + // identity must use to authenticate to S2A. + DefaultToken() (token string, err error) + // Token returns a token that an application with local identity equal to + // identity must use to authenticate to S2A. + Token(identity *commonpb.Identity) (token string, err error) +} + +type singleTokenAccessTokenManager struct { + token string +} + +// NewSingleTokenAccessTokenManager returns a new AccessTokenManager instance +// that will always manage the same token. +// +// The token to be managed is read from the s2aAccessTokenEnvironmentVariable +// environment variable. If this environment variable is not set, then this +// function returns an error. +func NewSingleTokenAccessTokenManager() (AccessTokenManager, error) { + token, variableExists := os.LookupEnv(s2aAccessTokenEnvironmentVariable) + if !variableExists { + return nil, fmt.Errorf("%s environment variable is not set", s2aAccessTokenEnvironmentVariable) + } + return &singleTokenAccessTokenManager{token: token}, nil +} + +// DefaultToken always returns the token managed by the +// singleTokenAccessTokenManager. +func (m *singleTokenAccessTokenManager) DefaultToken() (string, error) { + return m.token, nil +} + +// Token always returns the token managed by the singleTokenAccessTokenManager. +func (m *singleTokenAccessTokenManager) Token(*commonpb.Identity) (string, error) { + return m.token, nil +} diff --git a/vendor/github.com/google/s2a-go/internal/v2/README.md b/vendor/github.com/google/s2a-go/internal/v2/README.md new file mode 100644 index 00000000..3806d1e9 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/v2/README.md @@ -0,0 +1 @@ +**This directory has the implementation of the S2Av2's gRPC-Go client libraries** diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go b/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go new file mode 100644 index 00000000..cc811879 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go @@ -0,0 +1,122 @@ +/* + * + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package certverifier offloads verifications to S2Av2. +package certverifier + +import ( + "crypto/x509" + "fmt" + + "github.com/google/s2a-go/stream" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +// VerifyClientCertificateChain builds a SessionReq, sends it to S2Av2 and +// receives a SessionResp. +func VerifyClientCertificateChain(verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + // Offload verification to S2Av2. + if grpclog.V(1) { + grpclog.Infof("Sending request to S2Av2 for client peer cert chain validation.") + } + if err := s2AStream.Send(&s2av2pb.SessionReq{ + ReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{ + ValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{ + Mode: verificationMode, + PeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer_{ + ClientPeer: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer{ + CertificateChain: rawCerts, + }, + }, + }, + }, + }); err != nil { + grpclog.Infof("Failed to send request to S2Av2 for client peer cert chain validation.") + return err + } + + // Get the response from S2Av2. + resp, err := s2AStream.Recv() + if err != nil { + grpclog.Infof("Failed to receive client peer cert chain validation response from S2Av2.") + return err + } + + // Parse the response. + if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { + return fmt.Errorf("failed to offload client cert verification to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) + + } + + if resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS { + return fmt.Errorf("client cert verification failed: %v", resp.GetValidatePeerCertificateChainResp().ValidationDetails) + } + + return nil + } +} + +// VerifyServerCertificateChain builds a SessionReq, sends it to S2Av2 and +// receives a SessionResp. +func VerifyServerCertificateChain(hostname string, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream, serverAuthorizationPolicy []byte) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + // Offload verification to S2Av2. + if grpclog.V(1) { + grpclog.Infof("Sending request to S2Av2 for server peer cert chain validation.") + } + if err := s2AStream.Send(&s2av2pb.SessionReq{ + ReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{ + ValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{ + Mode: verificationMode, + PeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ServerPeer_{ + ServerPeer: &s2av2pb.ValidatePeerCertificateChainReq_ServerPeer{ + CertificateChain: rawCerts, + ServerHostname: hostname, + SerializedUnrestrictedClientPolicy: serverAuthorizationPolicy, + }, + }, + }, + }, + }); err != nil { + grpclog.Infof("Failed to send request to S2Av2 for server peer cert chain validation.") + return err + } + + // Get the response from S2Av2. + resp, err := s2AStream.Recv() + if err != nil { + grpclog.Infof("Failed to receive server peer cert chain validation response from S2Av2.") + return err + } + + // Parse the response. + if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { + return fmt.Errorf("failed to offload server cert verification to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) + } + + if resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS { + return fmt.Errorf("server cert verification failed: %v", resp.GetValidatePeerCertificateChainResp().ValidationDetails) + } + + return nil + } +} diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der new file mode 100644 index 00000000..958f3cfa Binary files /dev/null and b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der new file mode 100644 index 00000000..d2817641 Binary files /dev/null and b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der new file mode 100644 index 00000000..d8c3710c Binary files /dev/null and b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der new file mode 100644 index 00000000..dae619c0 Binary files /dev/null and b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der new file mode 100644 index 00000000..ce7f8d31 Binary files /dev/null and b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der new file mode 100644 index 00000000..04b0d736 Binary files /dev/null and b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go new file mode 100644 index 00000000..e7478d43 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go @@ -0,0 +1,186 @@ +/* + * + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package remotesigner offloads private key operations to S2Av2. +package remotesigner + +import ( + "crypto" + "crypto/rsa" + "crypto/x509" + "fmt" + "io" + + "github.com/google/s2a-go/stream" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +// remoteSigner implementes the crypto.Signer interface. +type remoteSigner struct { + leafCert *x509.Certificate + s2AStream stream.S2AStream +} + +// New returns an instance of RemoteSigner, an implementation of the +// crypto.Signer interface. +func New(leafCert *x509.Certificate, s2AStream stream.S2AStream) crypto.Signer { + return &remoteSigner{leafCert, s2AStream} +} + +func (s *remoteSigner) Public() crypto.PublicKey { + return s.leafCert.PublicKey +} + +func (s *remoteSigner) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) { + signatureAlgorithm, err := getSignatureAlgorithm(opts, s.leafCert) + if err != nil { + return nil, err + } + + req, err := getSignReq(signatureAlgorithm, digest) + if err != nil { + return nil, err + } + if grpclog.V(1) { + grpclog.Infof("Sending request to S2Av2 for signing operation.") + } + if err := s.s2AStream.Send(&s2av2pb.SessionReq{ + ReqOneof: &s2av2pb.SessionReq_OffloadPrivateKeyOperationReq{ + OffloadPrivateKeyOperationReq: req, + }, + }); err != nil { + grpclog.Infof("Failed to send request to S2Av2 for signing operation.") + return nil, err + } + + resp, err := s.s2AStream.Recv() + if err != nil { + grpclog.Infof("Failed to receive signing operation response from S2Av2.") + return nil, err + } + + if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { + return nil, fmt.Errorf("failed to offload signing with private key to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) + } + + return resp.GetOffloadPrivateKeyOperationResp().GetOutBytes(), nil +} + +// getCert returns the leafCert field in s. +func (s *remoteSigner) getCert() *x509.Certificate { + return s.leafCert +} + +// getStream returns the s2AStream field in s. +func (s *remoteSigner) getStream() stream.S2AStream { + return s.s2AStream +} + +func getSignReq(signatureAlgorithm s2av2pb.SignatureAlgorithm, digest []byte) (*s2av2pb.OffloadPrivateKeyOperationReq, error) { + if (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256) { + return &s2av2pb.OffloadPrivateKeyOperationReq{ + Operation: s2av2pb.OffloadPrivateKeyOperationReq_SIGN, + SignatureAlgorithm: signatureAlgorithm, + InBytes: &s2av2pb.OffloadPrivateKeyOperationReq_Sha256Digest{ + Sha256Digest: digest, + }, + }, nil + } else if (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384) { + return &s2av2pb.OffloadPrivateKeyOperationReq{ + Operation: s2av2pb.OffloadPrivateKeyOperationReq_SIGN, + SignatureAlgorithm: signatureAlgorithm, + InBytes: &s2av2pb.OffloadPrivateKeyOperationReq_Sha384Digest{ + Sha384Digest: digest, + }, + }, nil + } else if (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ED25519) { + return &s2av2pb.OffloadPrivateKeyOperationReq{ + Operation: s2av2pb.OffloadPrivateKeyOperationReq_SIGN, + SignatureAlgorithm: signatureAlgorithm, + InBytes: &s2av2pb.OffloadPrivateKeyOperationReq_Sha512Digest{ + Sha512Digest: digest, + }, + }, nil + } else { + return nil, fmt.Errorf("unknown signature algorithm: %v", signatureAlgorithm) + } +} + +// getSignatureAlgorithm returns the signature algorithm that S2A must use when +// performing a signing operation that has been offloaded by an application +// using the crypto/tls libraries. +func getSignatureAlgorithm(opts crypto.SignerOpts, leafCert *x509.Certificate) (s2av2pb.SignatureAlgorithm, error) { + if opts == nil || leafCert == nil { + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") + } + switch leafCert.PublicKeyAlgorithm { + case x509.RSA: + if rsaPSSOpts, ok := opts.(*rsa.PSSOptions); ok { + return rsaPSSAlgorithm(rsaPSSOpts) + } + return rsaPPKCS1Algorithm(opts) + case x509.ECDSA: + return ecdsaAlgorithm(opts) + case x509.Ed25519: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ED25519, nil + default: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm: %q", leafCert.PublicKeyAlgorithm) + } +} + +func rsaPSSAlgorithm(opts *rsa.PSSOptions) (s2av2pb.SignatureAlgorithm, error) { + switch opts.HashFunc() { + case crypto.SHA256: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256, nil + case crypto.SHA384: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384, nil + case crypto.SHA512: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512, nil + default: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") + } +} + +func rsaPPKCS1Algorithm(opts crypto.SignerOpts) (s2av2pb.SignatureAlgorithm, error) { + switch opts.HashFunc() { + case crypto.SHA256: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256, nil + case crypto.SHA384: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384, nil + case crypto.SHA512: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512, nil + default: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") + } +} + +func ecdsaAlgorithm(opts crypto.SignerOpts) (s2av2pb.SignatureAlgorithm, error) { + switch opts.HashFunc() { + case crypto.SHA256: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256, nil + case crypto.SHA384: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384, nil + case crypto.SHA512: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512, nil + default: + return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm") + } +} diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der new file mode 100644 index 00000000..d8c3710c Binary files /dev/null and b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem new file mode 100644 index 00000000..493a5a26 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 +a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 +OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 +RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK +P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 +HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu +0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 +EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 +/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA +QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ +nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD +X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco +pKklVz0= +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem new file mode 100644 index 00000000..55a7f10c --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF +l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj ++Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G +4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA +xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh +68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ +/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL +Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA +VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 +9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH +MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt +aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq +xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx +2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv +EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z +aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq +udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs +VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm +56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT +GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V +Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm +HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q +BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH +qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh +GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der new file mode 100644 index 00000000..04b0d736 Binary files /dev/null and b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der differ diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem new file mode 100644 index 00000000..0f98322c --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT +fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ +qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE +xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es +Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 +Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM +ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR +e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X +POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl +AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg +odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ +PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN +Dhm6uZM= +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem new file mode 100644 index 00000000..81afea78 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs +8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO +QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk +XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA +Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc +gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf +LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl +jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 +4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q +Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P +nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 +drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE +duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 +L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG +06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm +eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD +uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 +lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL +a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb +hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ +7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j +r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 +eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD +B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz +7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go new file mode 100644 index 00000000..ff172883 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go @@ -0,0 +1,354 @@ +/* + * + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package v2 provides the S2Av2 transport credentials used by a gRPC +// application. +package v2 + +import ( + "context" + "crypto/tls" + "errors" + "net" + "os" + "time" + + "github.com/golang/protobuf/proto" + "github.com/google/s2a-go/fallback" + "github.com/google/s2a-go/internal/handshaker/service" + "github.com/google/s2a-go/internal/tokenmanager" + "github.com/google/s2a-go/internal/v2/tlsconfigstore" + "github.com/google/s2a-go/stream" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + + commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +const ( + s2aSecurityProtocol = "tls" + defaultS2ATimeout = 3 * time.Second +) + +// An environment variable, which sets the timeout enforced on the connection to the S2A service for handshake. +const s2aTimeoutEnv = "S2A_TIMEOUT" + +type s2av2TransportCreds struct { + info *credentials.ProtocolInfo + isClient bool + serverName string + s2av2Address string + tokenManager *tokenmanager.AccessTokenManager + // localIdentity should only be used by the client. + localIdentity *commonpbv1.Identity + // localIdentities should only be used by the server. + localIdentities []*commonpbv1.Identity + verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode + fallbackClientHandshake fallback.ClientHandshake + getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) + serverAuthorizationPolicy []byte +} + +// NewClientCreds returns a client-side transport credentials object that uses +// the S2Av2 to establish a secure connection with a server. +func NewClientCreds(s2av2Address string, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) { + // Create an AccessTokenManager instance to use to authenticate to S2Av2. + accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + + creds := &s2av2TransportCreds{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: s2aSecurityProtocol, + }, + isClient: true, + serverName: "", + s2av2Address: s2av2Address, + localIdentity: localIdentity, + verificationMode: verificationMode, + fallbackClientHandshake: fallbackClientHandshakeFunc, + getS2AStream: getS2AStream, + serverAuthorizationPolicy: serverAuthorizationPolicy, + } + if err != nil { + creds.tokenManager = nil + } else { + creds.tokenManager = &accessTokenManager + } + if grpclog.V(1) { + grpclog.Info("Created client S2Av2 transport credentials.") + } + return creds, nil +} + +// NewServerCreds returns a server-side transport credentials object that uses +// the S2Av2 to establish a secure connection with a client. +func NewServerCreds(s2av2Address string, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) { + // Create an AccessTokenManager instance to use to authenticate to S2Av2. + accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + creds := &s2av2TransportCreds{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: s2aSecurityProtocol, + }, + isClient: false, + s2av2Address: s2av2Address, + localIdentities: localIdentities, + verificationMode: verificationMode, + getS2AStream: getS2AStream, + } + if err != nil { + creds.tokenManager = nil + } else { + creds.tokenManager = &accessTokenManager + } + if grpclog.V(1) { + grpclog.Info("Created server S2Av2 transport credentials.") + } + return creds, nil +} + +// ClientHandshake performs a client-side mTLS handshake using the S2Av2. +func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if !c.isClient { + return nil, nil, errors.New("client handshake called using server transport credentials") + } + // Remove the port from serverAuthority. + serverName := removeServerNamePort(serverAuthority) + timeoutCtx, cancel := context.WithTimeout(ctx, GetS2ATimeout()) + defer cancel() + s2AStream, err := createStream(timeoutCtx, c.s2av2Address, c.getS2AStream) + if err != nil { + grpclog.Infof("Failed to connect to S2Av2: %v", err) + if c.fallbackClientHandshake != nil { + return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) + } + return nil, nil, err + } + defer s2AStream.CloseSend() + if grpclog.V(1) { + grpclog.Infof("Connected to S2Av2.") + } + var config *tls.Config + + var tokenManager tokenmanager.AccessTokenManager + if c.tokenManager == nil { + tokenManager = nil + } else { + tokenManager = *c.tokenManager + } + + if c.serverName == "" { + config, err = tlsconfigstore.GetTLSConfigurationForClient(serverName, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy) + if err != nil { + grpclog.Info("Failed to get client TLS config from S2Av2: %v", err) + if c.fallbackClientHandshake != nil { + return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) + } + return nil, nil, err + } + } else { + config, err = tlsconfigstore.GetTLSConfigurationForClient(c.serverName, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy) + if err != nil { + grpclog.Info("Failed to get client TLS config from S2Av2: %v", err) + if c.fallbackClientHandshake != nil { + return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) + } + return nil, nil, err + } + } + if grpclog.V(1) { + grpclog.Infof("Got client TLS config from S2Av2.") + } + creds := credentials.NewTLS(config) + + conn, authInfo, err := creds.ClientHandshake(ctx, serverName, rawConn) + if err != nil { + grpclog.Infof("Failed to do client handshake using S2Av2: %v", err) + if c.fallbackClientHandshake != nil { + return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err) + } + return nil, nil, err + } + grpclog.Infof("Successfully done client handshake using S2Av2 to: %s", serverName) + + return conn, authInfo, err +} + +// ServerHandshake performs a server-side mTLS handshake using the S2Av2. +func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if c.isClient { + return nil, nil, errors.New("server handshake called using client transport credentials") + } + ctx, cancel := context.WithTimeout(context.Background(), GetS2ATimeout()) + defer cancel() + s2AStream, err := createStream(ctx, c.s2av2Address, c.getS2AStream) + if err != nil { + grpclog.Infof("Failed to connect to S2Av2: %v", err) + return nil, nil, err + } + defer s2AStream.CloseSend() + if grpclog.V(1) { + grpclog.Infof("Connected to S2Av2.") + } + + var tokenManager tokenmanager.AccessTokenManager + if c.tokenManager == nil { + tokenManager = nil + } else { + tokenManager = *c.tokenManager + } + + config, err := tlsconfigstore.GetTLSConfigurationForServer(s2AStream, tokenManager, c.localIdentities, c.verificationMode) + if err != nil { + grpclog.Infof("Failed to get server TLS config from S2Av2: %v", err) + return nil, nil, err + } + if grpclog.V(1) { + grpclog.Infof("Got server TLS config from S2Av2.") + } + creds := credentials.NewTLS(config) + return creds.ServerHandshake(rawConn) +} + +// Info returns protocol info of s2av2TransportCreds. +func (c *s2av2TransportCreds) Info() credentials.ProtocolInfo { + return *c.info +} + +// Clone makes a deep copy of s2av2TransportCreds. +func (c *s2av2TransportCreds) Clone() credentials.TransportCredentials { + info := *c.info + serverName := c.serverName + fallbackClientHandshake := c.fallbackClientHandshake + + s2av2Address := c.s2av2Address + var tokenManager tokenmanager.AccessTokenManager + if c.tokenManager == nil { + tokenManager = nil + } else { + tokenManager = *c.tokenManager + } + verificationMode := c.verificationMode + var localIdentity *commonpbv1.Identity + if c.localIdentity != nil { + localIdentity = proto.Clone(c.localIdentity).(*commonpbv1.Identity) + } + var localIdentities []*commonpbv1.Identity + if c.localIdentities != nil { + localIdentities = make([]*commonpbv1.Identity, len(c.localIdentities)) + for i, localIdentity := range c.localIdentities { + localIdentities[i] = proto.Clone(localIdentity).(*commonpbv1.Identity) + } + } + creds := &s2av2TransportCreds{ + info: &info, + isClient: c.isClient, + serverName: serverName, + fallbackClientHandshake: fallbackClientHandshake, + s2av2Address: s2av2Address, + localIdentity: localIdentity, + localIdentities: localIdentities, + verificationMode: verificationMode, + } + if c.tokenManager == nil { + creds.tokenManager = nil + } else { + creds.tokenManager = &tokenManager + } + return creds +} + +// NewClientTLSConfig returns a tls.Config instance that uses S2Av2 to establish a TLS connection as +// a client. The tls.Config MUST only be used to establish a single TLS connection. +func NewClientTLSConfig( + ctx context.Context, + s2av2Address string, + tokenManager tokenmanager.AccessTokenManager, + verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, + serverName string, + serverAuthorizationPolicy []byte) (*tls.Config, error) { + s2AStream, err := createStream(ctx, s2av2Address, nil) + if err != nil { + grpclog.Infof("Failed to connect to S2Av2: %v", err) + return nil, err + } + + return tlsconfigstore.GetTLSConfigurationForClient(removeServerNamePort(serverName), s2AStream, tokenManager, nil, verificationMode, serverAuthorizationPolicy) +} + +// OverrideServerName sets the ServerName in the s2av2TransportCreds protocol +// info. The ServerName MUST be a hostname. +func (c *s2av2TransportCreds) OverrideServerName(serverNameOverride string) error { + serverName := removeServerNamePort(serverNameOverride) + c.info.ServerName = serverName + c.serverName = serverName + return nil +} + +// Remove the trailing port from server name. +func removeServerNamePort(serverName string) string { + name, _, err := net.SplitHostPort(serverName) + if err != nil { + name = serverName + } + return name +} + +type s2AGrpcStream struct { + stream s2av2pb.S2AService_SetUpSessionClient +} + +func (x s2AGrpcStream) Send(m *s2av2pb.SessionReq) error { + return x.stream.Send(m) +} + +func (x s2AGrpcStream) Recv() (*s2av2pb.SessionResp, error) { + return x.stream.Recv() +} + +func (x s2AGrpcStream) CloseSend() error { + return x.stream.CloseSend() +} + +func createStream(ctx context.Context, s2av2Address string, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (stream.S2AStream, error) { + if getS2AStream != nil { + return getS2AStream(ctx, s2av2Address) + } + // TODO(rmehta19): Consider whether to close the connection to S2Av2. + conn, err := service.Dial(s2av2Address) + if err != nil { + return nil, err + } + client := s2av2pb.NewS2AServiceClient(conn) + gRPCStream, err := client.SetUpSession(ctx, []grpc.CallOption{}...) + if err != nil { + return nil, err + } + return &s2AGrpcStream{ + stream: gRPCStream, + }, nil +} + +// GetS2ATimeout returns the timeout enforced on the connection to the S2A service for handshake. +func GetS2ATimeout() time.Duration { + timeout, err := time.ParseDuration(os.Getenv(s2aTimeoutEnv)) + if err != nil { + return defaultS2ATimeout + } + return timeout +} diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem new file mode 100644 index 00000000..493a5a26 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 +a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 +OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 +RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK +P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 +HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu +0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 +EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 +/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA +QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ +nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD +X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco +pKklVz0= +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem new file mode 100644 index 00000000..55a7f10c --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF +l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj ++Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G +4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA +xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh +68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ +/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL +Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA +VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 +9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH +MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt +aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq +xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx +2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv +EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z +aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq +udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs +VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm +56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT +GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V +Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm +HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q +BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH +qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh +GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem new file mode 100644 index 00000000..0f98322c --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT +fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ +qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE +xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es +Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 +Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM +ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR +e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X +POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl +AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg +odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ +PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN +Dhm6uZM= +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem new file mode 100644 index 00000000..81afea78 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs +8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO +QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk +XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA +Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc +gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf +LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl +jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 +4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q +Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P +nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 +drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE +duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 +L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG +06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm +eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD +uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 +lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL +a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb +hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ +7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j +r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 +eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD +B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz +7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem new file mode 100644 index 00000000..493a5a26 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 +a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 +OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 +RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK +P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 +HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu +0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 +EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 +/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA +QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ +nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD +X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco +pKklVz0= +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem new file mode 100644 index 00000000..55a7f10c --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF +l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj ++Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G +4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA +xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh +68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ +/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL +Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA +VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 +9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH +MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt +aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq +xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx +2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv +EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z +aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq +udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs +VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm +56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT +GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V +Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm +HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q +BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH +qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh +GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem new file mode 100644 index 00000000..0f98322c --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT +fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ +qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE +xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es +Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 +Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM +ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR +e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X +POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl +AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg +odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ +PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN +Dhm6uZM= +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem new file mode 100644 index 00000000..81afea78 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs +8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO +QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk +XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA +Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc +gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf +LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl +jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 +4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q +Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P +nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 +drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE +duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 +L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG +06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm +eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD +uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 +lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL +a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb +hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ +7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j +r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 +eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD +B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz +7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go new file mode 100644 index 00000000..4d919132 --- /dev/null +++ b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go @@ -0,0 +1,404 @@ +/* + * + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tlsconfigstore offloads operations to S2Av2. +package tlsconfigstore + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + + "github.com/google/s2a-go/internal/tokenmanager" + "github.com/google/s2a-go/internal/v2/certverifier" + "github.com/google/s2a-go/internal/v2/remotesigner" + "github.com/google/s2a-go/stream" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + + commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto" + commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto" + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +const ( + // HTTP/2 + h2 = "h2" +) + +// GetTLSConfigurationForClient returns a tls.Config instance for use by a client application. +func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverAuthorizationPolicy []byte) (*tls.Config, error) { + authMechanisms := getAuthMechanisms(tokenManager, []*commonpbv1.Identity{localIdentity}) + + if grpclog.V(1) { + grpclog.Infof("Sending request to S2Av2 for client TLS config.") + } + // Send request to S2Av2 for config. + if err := s2AStream.Send(&s2av2pb.SessionReq{ + LocalIdentity: localIdentity, + AuthenticationMechanisms: authMechanisms, + ReqOneof: &s2av2pb.SessionReq_GetTlsConfigurationReq{ + GetTlsConfigurationReq: &s2av2pb.GetTlsConfigurationReq{ + ConnectionSide: commonpb.ConnectionSide_CONNECTION_SIDE_CLIENT, + }, + }, + }); err != nil { + grpclog.Infof("Failed to send request to S2Av2 for client TLS config") + return nil, err + } + + // Get the response containing config from S2Av2. + resp, err := s2AStream.Recv() + if err != nil { + grpclog.Infof("Failed to receive client TLS config response from S2Av2.") + return nil, err + } + + // TODO(rmehta19): Add unit test for this if statement. + if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { + return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) + } + + // Extract TLS configiguration from SessionResp. + tlsConfig := resp.GetGetTlsConfigurationResp().GetClientTlsConfiguration() + + var cert tls.Certificate + for i, v := range tlsConfig.CertificateChain { + // Populate Certificates field. + block, _ := pem.Decode([]byte(v)) + if block == nil { + return nil, errors.New("certificate in CertificateChain obtained from S2Av2 is empty") + } + x509Cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + cert.Certificate = append(cert.Certificate, x509Cert.Raw) + if i == 0 { + cert.Leaf = x509Cert + } + } + + if len(tlsConfig.CertificateChain) > 0 { + cert.PrivateKey = remotesigner.New(cert.Leaf, s2AStream) + if cert.PrivateKey == nil { + return nil, errors.New("failed to retrieve Private Key from Remote Signer Library") + } + } + + minVersion, maxVersion, err := getTLSMinMaxVersionsClient(tlsConfig) + if err != nil { + return nil, err + } + + // Create mTLS credentials for client. + config := &tls.Config{ + VerifyPeerCertificate: certverifier.VerifyServerCertificateChain(serverHostname, verificationMode, s2AStream, serverAuthorizationPolicy), + ServerName: serverHostname, + InsecureSkipVerify: true, // NOLINT + ClientSessionCache: nil, + SessionTicketsDisabled: true, + MinVersion: minVersion, + MaxVersion: maxVersion, + NextProtos: []string{h2}, + } + if len(tlsConfig.CertificateChain) > 0 { + config.Certificates = []tls.Certificate{cert} + } + return config, nil +} + +// GetTLSConfigurationForServer returns a tls.Config instance for use by a server application. +func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) { + return &tls.Config{ + GetConfigForClient: ClientConfig(tokenManager, localIdentities, verificationMode, s2AStream), + }, nil +} + +// ClientConfig builds a TLS config for a server to establish a secure +// connection with a client, based on SNI communicated during ClientHello. +// Ensures that server presents the correct certificate to establish a TLS +// connection. +func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) { + return func(chi *tls.ClientHelloInfo) (*tls.Config, error) { + tlsConfig, err := getServerConfigFromS2Av2(tokenManager, localIdentities, chi.ServerName, s2AStream) + if err != nil { + return nil, err + } + + var cert tls.Certificate + for i, v := range tlsConfig.CertificateChain { + // Populate Certificates field. + block, _ := pem.Decode([]byte(v)) + if block == nil { + return nil, errors.New("certificate in CertificateChain obtained from S2Av2 is empty") + } + x509Cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, err + } + cert.Certificate = append(cert.Certificate, x509Cert.Raw) + if i == 0 { + cert.Leaf = x509Cert + } + } + + cert.PrivateKey = remotesigner.New(cert.Leaf, s2AStream) + if cert.PrivateKey == nil { + return nil, errors.New("failed to retrieve Private Key from Remote Signer Library") + } + + minVersion, maxVersion, err := getTLSMinMaxVersionsServer(tlsConfig) + if err != nil { + return nil, err + } + + clientAuth := getTLSClientAuthType(tlsConfig) + + var cipherSuites []uint16 + cipherSuites = getCipherSuites(tlsConfig.Ciphersuites) + + // Create mTLS credentials for server. + return &tls.Config{ + Certificates: []tls.Certificate{cert}, + VerifyPeerCertificate: certverifier.VerifyClientCertificateChain(verificationMode, s2AStream), + ClientAuth: clientAuth, + CipherSuites: cipherSuites, + SessionTicketsDisabled: true, + MinVersion: minVersion, + MaxVersion: maxVersion, + NextProtos: []string{h2}, + }, nil + } +} + +func getCipherSuites(tlsConfigCipherSuites []commonpb.Ciphersuite) []uint16 { + var tlsGoCipherSuites []uint16 + for _, v := range tlsConfigCipherSuites { + s := getTLSCipherSuite(v) + if s != 0xffff { + tlsGoCipherSuites = append(tlsGoCipherSuites, s) + } + } + return tlsGoCipherSuites +} + +func getTLSCipherSuite(tlsCipherSuite commonpb.Ciphersuite) uint16 { + switch tlsCipherSuite { + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: + return tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: + return tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: + return tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256: + return tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384: + return tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 + case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: + return tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 + default: + return 0xffff + } +} + +func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) { + authMechanisms := getAuthMechanisms(tokenManager, localIdentities) + var locID *commonpbv1.Identity + if localIdentities != nil { + locID = localIdentities[0] + } + + if err := s2AStream.Send(&s2av2pb.SessionReq{ + LocalIdentity: locID, + AuthenticationMechanisms: authMechanisms, + ReqOneof: &s2av2pb.SessionReq_GetTlsConfigurationReq{ + GetTlsConfigurationReq: &s2av2pb.GetTlsConfigurationReq{ + ConnectionSide: commonpb.ConnectionSide_CONNECTION_SIDE_SERVER, + Sni: sni, + }, + }, + }); err != nil { + return nil, err + } + + resp, err := s2AStream.Recv() + if err != nil { + return nil, err + } + + // TODO(rmehta19): Add unit test for this if statement. + if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) { + return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details) + } + + return resp.GetGetTlsConfigurationResp().GetServerTlsConfiguration(), nil +} + +func getTLSClientAuthType(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration) tls.ClientAuthType { + var clientAuth tls.ClientAuthType + switch x := tlsConfig.RequestClientCertificate; x { + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_DONT_REQUEST_CLIENT_CERTIFICATE: + clientAuth = tls.NoClientCert + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: + clientAuth = tls.RequestClientCert + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY: + // This case actually maps to tls.VerifyClientCertIfGiven. However this + // mapping triggers normal verification, followed by custom verification, + // specified in VerifyPeerCertificate. To bypass normal verification, and + // only do custom verification we set clientAuth to RequireAnyClientCert or + // RequestClientCert. See https://github.com/google/s2a-go/pull/43 for full + // discussion. + clientAuth = tls.RequireAnyClientCert + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY: + clientAuth = tls.RequireAnyClientCert + case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY: + // This case actually maps to tls.RequireAndVerifyClientCert. However this + // mapping triggers normal verification, followed by custom verification, + // specified in VerifyPeerCertificate. To bypass normal verification, and + // only do custom verification we set clientAuth to RequireAnyClientCert or + // RequestClientCert. See https://github.com/google/s2a-go/pull/43 for full + // discussion. + clientAuth = tls.RequireAnyClientCert + default: + clientAuth = tls.RequireAnyClientCert + } + return clientAuth +} + +func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity) []*s2av2pb.AuthenticationMechanism { + if tokenManager == nil { + return nil + } + if len(localIdentities) == 0 { + token, err := tokenManager.DefaultToken() + if err != nil { + grpclog.Infof("Unable to get token for empty local identity: %v", err) + return nil + } + return []*s2av2pb.AuthenticationMechanism{ + { + MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ + Token: token, + }, + }, + } + } + var authMechanisms []*s2av2pb.AuthenticationMechanism + for _, localIdentity := range localIdentities { + if localIdentity == nil { + token, err := tokenManager.DefaultToken() + if err != nil { + grpclog.Infof("Unable to get default token for local identity %v: %v", localIdentity, err) + continue + } + authMechanisms = append(authMechanisms, &s2av2pb.AuthenticationMechanism{ + Identity: localIdentity, + MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ + Token: token, + }, + }) + } else { + token, err := tokenManager.Token(localIdentity) + if err != nil { + grpclog.Infof("Unable to get token for local identity %v: %v", localIdentity, err) + continue + } + authMechanisms = append(authMechanisms, &s2av2pb.AuthenticationMechanism{ + Identity: localIdentity, + MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{ + Token: token, + }, + }) + } + } + return authMechanisms +} + +// TODO(rmehta19): refactor switch statements into a helper function. +func getTLSMinMaxVersionsClient(tlsConfig *s2av2pb.GetTlsConfigurationResp_ClientTlsConfiguration) (uint16, uint16, error) { + // Map S2Av2 TLSVersion to consts defined in tls package. + var minVersion uint16 + var maxVersion uint16 + switch x := tlsConfig.MinTlsVersion; x { + case commonpb.TLSVersion_TLS_VERSION_1_0: + minVersion = tls.VersionTLS10 + case commonpb.TLSVersion_TLS_VERSION_1_1: + minVersion = tls.VersionTLS11 + case commonpb.TLSVersion_TLS_VERSION_1_2: + minVersion = tls.VersionTLS12 + case commonpb.TLSVersion_TLS_VERSION_1_3: + minVersion = tls.VersionTLS13 + default: + return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MinTlsVersion: %v", x) + } + + switch x := tlsConfig.MaxTlsVersion; x { + case commonpb.TLSVersion_TLS_VERSION_1_0: + maxVersion = tls.VersionTLS10 + case commonpb.TLSVersion_TLS_VERSION_1_1: + maxVersion = tls.VersionTLS11 + case commonpb.TLSVersion_TLS_VERSION_1_2: + maxVersion = tls.VersionTLS12 + case commonpb.TLSVersion_TLS_VERSION_1_3: + maxVersion = tls.VersionTLS13 + default: + return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MaxTlsVersion: %v", x) + } + if minVersion > maxVersion { + return minVersion, maxVersion, errors.New("S2Av2 provided minVersion > maxVersion") + } + return minVersion, maxVersion, nil +} + +func getTLSMinMaxVersionsServer(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration) (uint16, uint16, error) { + // Map S2Av2 TLSVersion to consts defined in tls package. + var minVersion uint16 + var maxVersion uint16 + switch x := tlsConfig.MinTlsVersion; x { + case commonpb.TLSVersion_TLS_VERSION_1_0: + minVersion = tls.VersionTLS10 + case commonpb.TLSVersion_TLS_VERSION_1_1: + minVersion = tls.VersionTLS11 + case commonpb.TLSVersion_TLS_VERSION_1_2: + minVersion = tls.VersionTLS12 + case commonpb.TLSVersion_TLS_VERSION_1_3: + minVersion = tls.VersionTLS13 + default: + return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MinTlsVersion: %v", x) + } + + switch x := tlsConfig.MaxTlsVersion; x { + case commonpb.TLSVersion_TLS_VERSION_1_0: + maxVersion = tls.VersionTLS10 + case commonpb.TLSVersion_TLS_VERSION_1_1: + maxVersion = tls.VersionTLS11 + case commonpb.TLSVersion_TLS_VERSION_1_2: + maxVersion = tls.VersionTLS12 + case commonpb.TLSVersion_TLS_VERSION_1_3: + maxVersion = tls.VersionTLS13 + default: + return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MaxTlsVersion: %v", x) + } + if minVersion > maxVersion { + return minVersion, maxVersion, errors.New("S2Av2 provided minVersion > maxVersion") + } + return minVersion, maxVersion, nil +} diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go new file mode 100644 index 00000000..1c1349de --- /dev/null +++ b/vendor/github.com/google/s2a-go/s2a.go @@ -0,0 +1,412 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package s2a provides the S2A transport credentials used by a gRPC +// application. +package s2a + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "github.com/google/s2a-go/fallback" + "github.com/google/s2a-go/internal/handshaker" + "github.com/google/s2a-go/internal/handshaker/service" + "github.com/google/s2a-go/internal/tokenmanager" + "github.com/google/s2a-go/internal/v2" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +const ( + s2aSecurityProtocol = "tls" + // defaultTimeout specifies the default server handshake timeout. + defaultTimeout = 30.0 * time.Second +) + +// s2aTransportCreds are the transport credentials required for establishing +// a secure connection using the S2A. They implement the +// credentials.TransportCredentials interface. +type s2aTransportCreds struct { + info *credentials.ProtocolInfo + minTLSVersion commonpb.TLSVersion + maxTLSVersion commonpb.TLSVersion + // tlsCiphersuites contains the ciphersuites used in the S2A connection. + // Note that these are currently unconfigurable. + tlsCiphersuites []commonpb.Ciphersuite + // localIdentity should only be used by the client. + localIdentity *commonpb.Identity + // localIdentities should only be used by the server. + localIdentities []*commonpb.Identity + // targetIdentities should only be used by the client. + targetIdentities []*commonpb.Identity + isClient bool + s2aAddr string + ensureProcessSessionTickets *sync.WaitGroup +} + +// NewClientCreds returns a client-side transport credentials object that uses +// the S2A to establish a secure connection with a server. +func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, error) { + if opts == nil { + return nil, errors.New("nil client options") + } + var targetIdentities []*commonpb.Identity + for _, targetIdentity := range opts.TargetIdentities { + protoTargetIdentity, err := toProtoIdentity(targetIdentity) + if err != nil { + return nil, err + } + targetIdentities = append(targetIdentities, protoTargetIdentity) + } + localIdentity, err := toProtoIdentity(opts.LocalIdentity) + if err != nil { + return nil, err + } + if opts.EnableLegacyMode { + return &s2aTransportCreds{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: s2aSecurityProtocol, + }, + minTLSVersion: commonpb.TLSVersion_TLS1_3, + maxTLSVersion: commonpb.TLSVersion_TLS1_3, + tlsCiphersuites: []commonpb.Ciphersuite{ + commonpb.Ciphersuite_AES_128_GCM_SHA256, + commonpb.Ciphersuite_AES_256_GCM_SHA384, + commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, + }, + localIdentity: localIdentity, + targetIdentities: targetIdentities, + isClient: true, + s2aAddr: opts.S2AAddress, + ensureProcessSessionTickets: opts.EnsureProcessSessionTickets, + }, nil + } + verificationMode := getVerificationMode(opts.VerificationMode) + var fallbackFunc fallback.ClientHandshake + if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackClientHandshakeFunc != nil { + fallbackFunc = opts.FallbackOpts.FallbackClientHandshakeFunc + } + return v2.NewClientCreds(opts.S2AAddress, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy) +} + +// NewServerCreds returns a server-side transport credentials object that uses +// the S2A to establish a secure connection with a client. +func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, error) { + if opts == nil { + return nil, errors.New("nil server options") + } + var localIdentities []*commonpb.Identity + for _, localIdentity := range opts.LocalIdentities { + protoLocalIdentity, err := toProtoIdentity(localIdentity) + if err != nil { + return nil, err + } + localIdentities = append(localIdentities, protoLocalIdentity) + } + if opts.EnableLegacyMode { + return &s2aTransportCreds{ + info: &credentials.ProtocolInfo{ + SecurityProtocol: s2aSecurityProtocol, + }, + minTLSVersion: commonpb.TLSVersion_TLS1_3, + maxTLSVersion: commonpb.TLSVersion_TLS1_3, + tlsCiphersuites: []commonpb.Ciphersuite{ + commonpb.Ciphersuite_AES_128_GCM_SHA256, + commonpb.Ciphersuite_AES_256_GCM_SHA384, + commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256, + }, + localIdentities: localIdentities, + isClient: false, + s2aAddr: opts.S2AAddress, + }, nil + } + verificationMode := getVerificationMode(opts.VerificationMode) + return v2.NewServerCreds(opts.S2AAddress, localIdentities, verificationMode, opts.getS2AStream) +} + +// ClientHandshake initiates a client-side TLS handshake using the S2A. +func (c *s2aTransportCreds) ClientHandshake(ctx context.Context, serverAuthority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if !c.isClient { + return nil, nil, errors.New("client handshake called using server transport credentials") + } + + // Connect to the S2A. + hsConn, err := service.Dial(c.s2aAddr) + if err != nil { + grpclog.Infof("Failed to connect to S2A: %v", err) + return nil, nil, err + } + + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + + opts := &handshaker.ClientHandshakerOptions{ + MinTLSVersion: c.minTLSVersion, + MaxTLSVersion: c.maxTLSVersion, + TLSCiphersuites: c.tlsCiphersuites, + TargetIdentities: c.targetIdentities, + LocalIdentity: c.localIdentity, + TargetName: serverAuthority, + EnsureProcessSessionTickets: c.ensureProcessSessionTickets, + } + chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, c.s2aAddr, opts) + if err != nil { + grpclog.Infof("Call to handshaker.NewClientHandshaker failed: %v", err) + return nil, nil, err + } + defer func() { + if err != nil { + if closeErr := chs.Close(); closeErr != nil { + grpclog.Infof("Close failed unexpectedly: %v", err) + err = fmt.Errorf("%v: close unexpectedly failed: %v", err, closeErr) + } + } + }() + + secConn, authInfo, err := chs.ClientHandshake(context.Background()) + if err != nil { + grpclog.Infof("Handshake failed: %v", err) + return nil, nil, err + } + return secConn, authInfo, nil +} + +// ServerHandshake initiates a server-side TLS handshake using the S2A. +func (c *s2aTransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if c.isClient { + return nil, nil, errors.New("server handshake called using client transport credentials") + } + + // Connect to the S2A. + hsConn, err := service.Dial(c.s2aAddr) + if err != nil { + grpclog.Infof("Failed to connect to S2A: %v", err) + return nil, nil, err + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + + opts := &handshaker.ServerHandshakerOptions{ + MinTLSVersion: c.minTLSVersion, + MaxTLSVersion: c.maxTLSVersion, + TLSCiphersuites: c.tlsCiphersuites, + LocalIdentities: c.localIdentities, + } + shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, c.s2aAddr, opts) + if err != nil { + grpclog.Infof("Call to handshaker.NewServerHandshaker failed: %v", err) + return nil, nil, err + } + defer func() { + if err != nil { + if closeErr := shs.Close(); closeErr != nil { + grpclog.Infof("Close failed unexpectedly: %v", err) + err = fmt.Errorf("%v: close unexpectedly failed: %v", err, closeErr) + } + } + }() + + secConn, authInfo, err := shs.ServerHandshake(context.Background()) + if err != nil { + grpclog.Infof("Handshake failed: %v", err) + return nil, nil, err + } + return secConn, authInfo, nil +} + +func (c *s2aTransportCreds) Info() credentials.ProtocolInfo { + return *c.info +} + +func (c *s2aTransportCreds) Clone() credentials.TransportCredentials { + info := *c.info + var localIdentity *commonpb.Identity + if c.localIdentity != nil { + localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity) + } + var localIdentities []*commonpb.Identity + if c.localIdentities != nil { + localIdentities = make([]*commonpb.Identity, len(c.localIdentities)) + for i, localIdentity := range c.localIdentities { + localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity) + } + } + var targetIdentities []*commonpb.Identity + if c.targetIdentities != nil { + targetIdentities = make([]*commonpb.Identity, len(c.targetIdentities)) + for i, targetIdentity := range c.targetIdentities { + targetIdentities[i] = proto.Clone(targetIdentity).(*commonpb.Identity) + } + } + return &s2aTransportCreds{ + info: &info, + minTLSVersion: c.minTLSVersion, + maxTLSVersion: c.maxTLSVersion, + tlsCiphersuites: c.tlsCiphersuites, + localIdentity: localIdentity, + localIdentities: localIdentities, + targetIdentities: targetIdentities, + isClient: c.isClient, + s2aAddr: c.s2aAddr, + } +} + +func (c *s2aTransportCreds) OverrideServerName(serverNameOverride string) error { + c.info.ServerName = serverNameOverride + return nil +} + +// TLSClientConfigOptions specifies parameters for creating client TLS config. +type TLSClientConfigOptions struct { + // ServerName is required by s2a as the expected name when verifying the hostname found in server's certificate. + // tlsConfig, _ := factory.Build(ctx, &s2a.TLSClientConfigOptions{ + // ServerName: "example.com", + // }) + ServerName string +} + +// TLSClientConfigFactory defines the interface for a client TLS config factory. +type TLSClientConfigFactory interface { + Build(ctx context.Context, opts *TLSClientConfigOptions) (*tls.Config, error) +} + +// NewTLSClientConfigFactory returns an instance of s2aTLSClientConfigFactory. +func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, error) { + if opts == nil { + return nil, fmt.Errorf("opts must be non-nil") + } + if opts.EnableLegacyMode { + return nil, fmt.Errorf("NewTLSClientConfigFactory only supports S2Av2") + } + tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager() + if err != nil { + // The only possible error is: access token not set in the environment, + // which is okay in environments other than serverless. + grpclog.Infof("Access token manager not initialized: %v", err) + return &s2aTLSClientConfigFactory{ + s2av2Address: opts.S2AAddress, + tokenManager: nil, + verificationMode: getVerificationMode(opts.VerificationMode), + serverAuthorizationPolicy: opts.serverAuthorizationPolicy, + }, nil + } + return &s2aTLSClientConfigFactory{ + s2av2Address: opts.S2AAddress, + tokenManager: tokenManager, + verificationMode: getVerificationMode(opts.VerificationMode), + serverAuthorizationPolicy: opts.serverAuthorizationPolicy, + }, nil +} + +type s2aTLSClientConfigFactory struct { + s2av2Address string + tokenManager tokenmanager.AccessTokenManager + verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode + serverAuthorizationPolicy []byte +} + +func (f *s2aTLSClientConfigFactory) Build( + ctx context.Context, opts *TLSClientConfigOptions) (*tls.Config, error) { + serverName := "" + if opts != nil && opts.ServerName != "" { + serverName = opts.ServerName + } + return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy) +} + +func getVerificationMode(verificationMode VerificationModeType) s2av2pb.ValidatePeerCertificateChainReq_VerificationMode { + switch verificationMode { + case ConnectToGoogle: + return s2av2pb.ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE + case Spiffe: + return s2av2pb.ValidatePeerCertificateChainReq_SPIFFE + default: + return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED + } +} + +// NewS2ADialTLSContextFunc returns a dialer which establishes an MTLS connection using S2A. +// Example use with http.RoundTripper: +// +// dialTLSContext := s2a.NewS2aDialTLSContextFunc(&s2a.ClientOptions{ +// S2AAddress: s2aAddress, // required +// }) +// transport := http.DefaultTransport +// transport.DialTLSContext = dialTLSContext +func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, network, addr string) (net.Conn, error) { + + return func(ctx context.Context, network, addr string) (net.Conn, error) { + + fallback := func(err error) (net.Conn, error) { + if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackDialer != nil && + opts.FallbackOpts.FallbackDialer.Dialer != nil && opts.FallbackOpts.FallbackDialer.ServerAddr != "" { + fbDialer := opts.FallbackOpts.FallbackDialer + grpclog.Infof("fall back to dial: %s", fbDialer.ServerAddr) + fbConn, fbErr := fbDialer.Dialer.DialContext(ctx, network, fbDialer.ServerAddr) + if fbErr != nil { + return nil, fmt.Errorf("error fallback to %s: %v; S2A error: %w", fbDialer.ServerAddr, fbErr, err) + } + return fbConn, nil + } + return nil, err + } + + factory, err := NewTLSClientConfigFactory(opts) + if err != nil { + grpclog.Infof("error creating S2A client config factory: %v", err) + return fallback(err) + } + + serverName, _, err := net.SplitHostPort(addr) + if err != nil { + serverName = addr + } + timeoutCtx, cancel := context.WithTimeout(ctx, v2.GetS2ATimeout()) + defer cancel() + s2aTLSConfig, err := factory.Build(timeoutCtx, &TLSClientConfigOptions{ + ServerName: serverName, + }) + if err != nil { + grpclog.Infof("error building S2A TLS config: %v", err) + return fallback(err) + } + + s2aDialer := &tls.Dialer{ + Config: s2aTLSConfig, + } + c, err := s2aDialer.DialContext(ctx, network, addr) + if err != nil { + grpclog.Infof("error dialing with S2A to %s: %v", addr, err) + return fallback(err) + } + grpclog.Infof("success dialing MTLS to %s with S2A", addr) + return c, nil + } +} diff --git a/vendor/github.com/google/s2a-go/s2a_options.go b/vendor/github.com/google/s2a-go/s2a_options.go new file mode 100644 index 00000000..94feafb9 --- /dev/null +++ b/vendor/github.com/google/s2a-go/s2a_options.go @@ -0,0 +1,208 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package s2a + +import ( + "context" + "crypto/tls" + "errors" + "sync" + + "github.com/google/s2a-go/fallback" + "github.com/google/s2a-go/stream" + + s2apb "github.com/google/s2a-go/internal/proto/common_go_proto" +) + +// Identity is the interface for S2A identities. +type Identity interface { + // Name returns the name of the identity. + Name() string +} + +type spiffeID struct { + spiffeID string +} + +func (s *spiffeID) Name() string { return s.spiffeID } + +// NewSpiffeID creates a SPIFFE ID from id. +func NewSpiffeID(id string) Identity { + return &spiffeID{spiffeID: id} +} + +type hostname struct { + hostname string +} + +func (h *hostname) Name() string { return h.hostname } + +// NewHostname creates a hostname from name. +func NewHostname(name string) Identity { + return &hostname{hostname: name} +} + +type uid struct { + uid string +} + +func (h *uid) Name() string { return h.uid } + +// NewUID creates a UID from name. +func NewUID(name string) Identity { + return &uid{uid: name} +} + +// VerificationModeType specifies the mode that S2A must use to verify the peer +// certificate chain. +type VerificationModeType int + +// Three types of verification modes. +const ( + Unspecified = iota + ConnectToGoogle + Spiffe +) + +// ClientOptions contains the client-side options used to establish a secure +// channel using the S2A handshaker service. +type ClientOptions struct { + // TargetIdentities contains a list of allowed server identities. One of the + // target identities should match the peer identity in the handshake + // result; otherwise, the handshake fails. + TargetIdentities []Identity + // LocalIdentity is the local identity of the client application. If none is + // provided, then the S2A will choose the default identity, if one exists. + LocalIdentity Identity + // S2AAddress is the address of the S2A. + S2AAddress string + // EnsureProcessSessionTickets waits for all session tickets to be sent to + // S2A before a process completes. + // + // This functionality is crucial for processes that complete very soon after + // using S2A to establish a TLS connection, but it can be ignored for longer + // lived processes. + // + // Usage example: + // func main() { + // var ensureProcessSessionTickets sync.WaitGroup + // clientOpts := &s2a.ClientOptions{ + // EnsureProcessSessionTickets: &ensureProcessSessionTickets, + // // Set other members. + // } + // creds, _ := s2a.NewClientCreds(clientOpts) + // conn, _ := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds)) + // defer conn.Close() + // + // // Make RPC call. + // + // // The process terminates right after the RPC call ends. + // // ensureProcessSessionTickets can be used to ensure resumption + // // tickets are fully processed. If the process is long-lived, using + // // ensureProcessSessionTickets is not necessary. + // ensureProcessSessionTickets.Wait() + // } + EnsureProcessSessionTickets *sync.WaitGroup + // If true, enables the use of legacy S2Av1. + EnableLegacyMode bool + // VerificationMode specifies the mode that S2A must use to verify the + // peer certificate chain. + VerificationMode VerificationModeType + + // Optional fallback after dialing with S2A fails. + FallbackOpts *FallbackOptions + + // Generates an S2AStream interface for talking to the S2A server. + getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) + + // Serialized user specified policy for server authorization. + serverAuthorizationPolicy []byte +} + +// FallbackOptions prescribes the fallback logic that should be taken if the application fails to connect with S2A. +type FallbackOptions struct { + // FallbackClientHandshakeFunc is used to specify fallback behavior when calling s2a.NewClientCreds(). + // It will be called by ClientHandshake function, after handshake with S2A fails. + // s2a.NewClientCreds() ignores the other FallbackDialer field. + FallbackClientHandshakeFunc fallback.ClientHandshake + + // FallbackDialer is used to specify fallback behavior when calling s2a.NewS2aDialTLSContextFunc(). + // It passes in a custom fallback dialer and server address to use after dialing with S2A fails. + // s2a.NewS2aDialTLSContextFunc() ignores the other FallbackClientHandshakeFunc field. + FallbackDialer *FallbackDialer +} + +// FallbackDialer contains a fallback tls.Dialer and a server address to connect to. +type FallbackDialer struct { + // Dialer specifies a fallback tls.Dialer. + Dialer *tls.Dialer + // ServerAddr is used by Dialer to establish fallback connection. + ServerAddr string +} + +// DefaultClientOptions returns the default client options. +func DefaultClientOptions(s2aAddress string) *ClientOptions { + return &ClientOptions{ + S2AAddress: s2aAddress, + VerificationMode: ConnectToGoogle, + } +} + +// ServerOptions contains the server-side options used to establish a secure +// channel using the S2A handshaker service. +type ServerOptions struct { + // LocalIdentities is the list of local identities that may be assumed by + // the server. If no local identity is specified, then the S2A chooses a + // default local identity, if one exists. + LocalIdentities []Identity + // S2AAddress is the address of the S2A. + S2AAddress string + // If true, enables the use of legacy S2Av1. + EnableLegacyMode bool + // VerificationMode specifies the mode that S2A must use to verify the + // peer certificate chain. + VerificationMode VerificationModeType + + // Generates an S2AStream interface for talking to the S2A server. + getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error) +} + +// DefaultServerOptions returns the default server options. +func DefaultServerOptions(s2aAddress string) *ServerOptions { + return &ServerOptions{ + S2AAddress: s2aAddress, + VerificationMode: ConnectToGoogle, + } +} + +func toProtoIdentity(identity Identity) (*s2apb.Identity, error) { + if identity == nil { + return nil, nil + } + switch id := identity.(type) { + case *spiffeID: + return &s2apb.Identity{IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}}, nil + case *hostname: + return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()}}, nil + case *uid: + return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()}}, nil + default: + return nil, errors.New("unrecognized identity type") + } +} diff --git a/vendor/github.com/google/s2a-go/s2a_utils.go b/vendor/github.com/google/s2a-go/s2a_utils.go new file mode 100644 index 00000000..d649cc46 --- /dev/null +++ b/vendor/github.com/google/s2a-go/s2a_utils.go @@ -0,0 +1,79 @@ +/* + * + * Copyright 2021 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package s2a + +import ( + "context" + "errors" + + commonpb "github.com/google/s2a-go/internal/proto/common_go_proto" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/peer" +) + +// AuthInfo exposes security information from the S2A to the application. +type AuthInfo interface { + // AuthType returns the authentication type. + AuthType() string + // ApplicationProtocol returns the application protocol, e.g. "grpc". + ApplicationProtocol() string + // TLSVersion returns the TLS version negotiated during the handshake. + TLSVersion() commonpb.TLSVersion + // Ciphersuite returns the ciphersuite negotiated during the handshake. + Ciphersuite() commonpb.Ciphersuite + // PeerIdentity returns the authenticated identity of the peer. + PeerIdentity() *commonpb.Identity + // LocalIdentity returns the local identity of the application used during + // session setup. + LocalIdentity() *commonpb.Identity + // PeerCertFingerprint returns the SHA256 hash of the peer certificate used in + // the S2A handshake. + PeerCertFingerprint() []byte + // LocalCertFingerprint returns the SHA256 hash of the local certificate used + // in the S2A handshake. + LocalCertFingerprint() []byte + // IsHandshakeResumed returns true if a cached session was used to resume + // the handshake. + IsHandshakeResumed() bool + // SecurityLevel returns the security level of the connection. + SecurityLevel() credentials.SecurityLevel +} + +// AuthInfoFromPeer extracts the authinfo.S2AAuthInfo object from the given +// peer, if it exists. This API should be used by gRPC clients after +// obtaining a peer object using the grpc.Peer() CallOption. +func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) { + s2aAuthInfo, ok := p.AuthInfo.(AuthInfo) + if !ok { + return nil, errors.New("no S2AAuthInfo found in Peer") + } + return s2aAuthInfo, nil +} + +// AuthInfoFromContext extracts the authinfo.S2AAuthInfo object from the given +// context, if it exists. This API should be used by gRPC server RPC handlers +// to get information about the peer. On the client-side, use the grpc.Peer() +// CallOption and the AuthInfoFromPeer function. +func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return nil, errors.New("no Peer found in Context") + } + return AuthInfoFromPeer(p) +} diff --git a/vendor/github.com/google/s2a-go/stream/s2a_stream.go b/vendor/github.com/google/s2a-go/stream/s2a_stream.go new file mode 100644 index 00000000..584bf32b --- /dev/null +++ b/vendor/github.com/google/s2a-go/stream/s2a_stream.go @@ -0,0 +1,34 @@ +/* + * + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stream provides an interface for bidirectional streaming to the S2A server. +package stream + +import ( + s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto" +) + +// S2AStream defines the operation for communicating with the S2A server over a bidirectional stream. +type S2AStream interface { + // Send sends the message to the S2A server. + Send(*s2av2pb.SessionReq) error + // Recv receives the message from the S2A server. + Recv() (*s2av2pb.SessionResp, error) + // Closes the channel to the S2A server. + CloseSend() error +} diff --git a/vendor/github.com/google/s2a-go/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/testdata/client_cert.pem new file mode 100644 index 00000000..493a5a26 --- /dev/null +++ b/vendor/github.com/google/s2a-go/testdata/client_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9 +a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0 +OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3 +RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK +P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316 +HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu +0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6 +EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9 +/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA +QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ +nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD +X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco +pKklVz0= +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/client_key.pem b/vendor/github.com/google/s2a-go/testdata/client_key.pem new file mode 100644 index 00000000..55a7f10c --- /dev/null +++ b/vendor/github.com/google/s2a-go/testdata/client_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF +l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj ++Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G +4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA +xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh +68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ +/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL +Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA +VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9 +9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH +MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt +aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq +xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx +2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv +EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z +aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq +udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs +VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm +56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT +GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V +Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm +HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q +BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH +qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh +GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/google/s2a-go/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/testdata/server_cert.pem new file mode 100644 index 00000000..0f98322c --- /dev/null +++ b/vendor/github.com/google/s2a-go/testdata/server_cert.pem @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL +BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2 +YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE +AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN +MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ +BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx +ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ +KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT +fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ +qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE +xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es +Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2 +Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM +ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR +e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X +POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl +AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg +odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+ +PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN +Dhm6uZM= +-----END CERTIFICATE----- diff --git a/vendor/github.com/google/s2a-go/testdata/server_key.pem b/vendor/github.com/google/s2a-go/testdata/server_key.pem new file mode 100644 index 00000000..81afea78 --- /dev/null +++ b/vendor/github.com/google/s2a-go/testdata/server_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs +8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO +QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk +XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA +Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc +gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf +LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl +jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0 +4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q +Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P +nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1 +drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE +duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50 +L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG +06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm +eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD +uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7 +lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL +a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb +hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ +7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j +r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7 +eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD +B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz +7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g== +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go index 0ba9da7d..b3283b81 100644 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go @@ -1,6 +1,15 @@ // Copyright 2022 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // Package client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner"). // @@ -13,6 +22,7 @@ import ( "crypto/rsa" "crypto/x509" "encoding/gob" + "errors" "fmt" "io" "net/rpc" @@ -72,9 +82,9 @@ func (k *Key) Close() error { if err := k.cmd.Process.Kill(); err != nil { return fmt.Errorf("failed to kill signer process: %w", err) } - if err := k.cmd.Wait(); err.Error() != "signal: killed" { - return fmt.Errorf("signer process was not killed: %w", err) - } + // Wait for cmd to exit and release resources. Since the process is forcefully killed, this + // will return a non-nil error (varies by OS), which we will ignore. + _ = k.cmd.Wait() // The Pipes connecting the RPC client should have been closed when the signer subprocess was killed. // Calling `k.client.Close()` before `k.cmd.Process.Kill()` or `k.cmd.Wait()` _will_ cause a segfault. if err := k.client.Close(); err.Error() != "close |0: file already closed" { @@ -97,6 +107,10 @@ func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed [ return } +// ErrCredUnavailable is a sentinel error that indicates ECP Cred is unavailable, +// possibly due to missing config or missing binary path. +var ErrCredUnavailable = errors.New("Cred is unavailable") + // Cred spawns a signer subprocess that listens on stdin/stdout to perform certificate // related operations, including signing messages with the private key. // @@ -110,6 +124,9 @@ func Cred(configFilePath string) (*Key, error) { } enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath) if err != nil { + if errors.Is(err, util.ErrConfigUnavailable) { + return nil, ErrCredUnavailable + } return nil, err } k := &Key{ diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go index ccef5278..1640ec1c 100644 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go @@ -1,10 +1,23 @@ +// Copyright 2022 Google LLC. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Package util provides helper functions for the client. package util import ( "encoding/json" "errors" - "io/ioutil" + "io" "os" "os/user" "path/filepath" @@ -23,14 +36,21 @@ type Libs struct { ECP string `json:"ecp"` } +// ErrConfigUnavailable is a sentinel error that indicates ECP config is unavailable, +// possibly due to entire config missing or missing binary path. +var ErrConfigUnavailable = errors.New("Config is unavailable") + // LoadSignerBinaryPath retrieves the path of the signer binary from the config file. func LoadSignerBinaryPath(configFilePath string) (path string, err error) { jsonFile, err := os.Open(configFilePath) if err != nil { + if errors.Is(err, os.ErrNotExist) { + return "", ErrConfigUnavailable + } return "", err } - byteValue, err := ioutil.ReadAll(jsonFile) + byteValue, err := io.ReadAll(jsonFile) if err != nil { return "", err } @@ -41,7 +61,7 @@ func LoadSignerBinaryPath(configFilePath string) (path string, err error) { } signerBinaryPath := config.Libs.ECP if signerBinaryPath == "" { - return "", errors.New("signer binary path is missing") + return "", ErrConfigUnavailable } return signerBinaryPath, nil } diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index d88960b7..91d60a80 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.7.0" + "v2": "2.11.0" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index b75170f2..e17b196f 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,57 @@ # Changelog +## [2.11.0](https://github.com/googleapis/gax-go/compare/v2.10.0...v2.11.0) (2023-06-13) + + +### Features + +* **v2:** add GoVersion package variable ([#283](https://github.com/googleapis/gax-go/issues/283)) ([26553cc](https://github.com/googleapis/gax-go/commit/26553ccadb4016b189881f52e6c253b68bb3e3d5)) + + +### Bug Fixes + +* **v2:** handle space in non-devel go version ([#288](https://github.com/googleapis/gax-go/issues/288)) ([fd7bca0](https://github.com/googleapis/gax-go/commit/fd7bca029a1c5e63def8f0a5fd1ec3f725d92f75)) + +## [2.10.0](https://github.com/googleapis/gax-go/compare/v2.9.1...v2.10.0) (2023-05-30) + + +### Features + +* update dependencies ([#280](https://github.com/googleapis/gax-go/issues/280)) ([4514281](https://github.com/googleapis/gax-go/commit/4514281058590f3637c36bfd49baa65c4d3cfb21)) + +## [2.9.1](https://github.com/googleapis/gax-go/compare/v2.9.0...v2.9.1) (2023-05-23) + + +### Bug Fixes + +* **v2:** drop cloud lro test dep ([#276](https://github.com/googleapis/gax-go/issues/276)) ([c67eeba](https://github.com/googleapis/gax-go/commit/c67eeba0f10a3294b1d93c1b8fbe40211a55ae5f)), refs [#270](https://github.com/googleapis/gax-go/issues/270) + +## [2.9.0](https://github.com/googleapis/gax-go/compare/v2.8.0...v2.9.0) (2023-05-22) + + +### Features + +* **apierror:** add method to return HTTP status code conditionally ([#274](https://github.com/googleapis/gax-go/issues/274)) ([5874431](https://github.com/googleapis/gax-go/commit/587443169acd10f7f86d1989dc8aaf189e645e98)), refs [#229](https://github.com/googleapis/gax-go/issues/229) + + +### Documentation + +* add ref to usage with clients ([#272](https://github.com/googleapis/gax-go/issues/272)) ([ea4d72d](https://github.com/googleapis/gax-go/commit/ea4d72d514beba4de450868b5fb028601a29164e)), refs [#228](https://github.com/googleapis/gax-go/issues/228) + +## [2.8.0](https://github.com/googleapis/gax-go/compare/v2.7.1...v2.8.0) (2023-03-15) + + +### Features + +* **v2:** add WithTimeout option ([#259](https://github.com/googleapis/gax-go/issues/259)) ([9a8da43](https://github.com/googleapis/gax-go/commit/9a8da43693002448b1e8758023699387481866d1)) + +## [2.7.1](https://github.com/googleapis/gax-go/compare/v2.7.0...v2.7.1) (2023-03-06) + + +### Bug Fixes + +* **v2/apierror:** return Unknown GRPCStatus when err source is HTTP ([#260](https://github.com/googleapis/gax-go/issues/260)) ([043b734](https://github.com/googleapis/gax-go/commit/043b73437a240a91229207fb3ee52a9935a36f23)), refs [#254](https://github.com/googleapis/gax-go/issues/254) + ## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02) diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index aa6be130..d785a065 100644 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -29,6 +29,10 @@ // Package apierror implements a wrapper error for parsing error details from // API calls. Both HTTP & gRPC status errors are supported. +// +// For examples of how to use [APIError] with client libraries please reference +// [Inspecting errors](https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors) +// in the client library documentation. package apierror import ( @@ -39,6 +43,7 @@ import ( jsonerror "github.com/googleapis/gax-go/v2/apierror/internal/proto" "google.golang.org/api/googleapi" "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/proto" @@ -197,12 +202,12 @@ func (a *APIError) Unwrap() error { // Error returns a readable representation of the APIError. func (a *APIError) Error() string { var msg string - if a.status != nil { - msg = a.err.Error() - } else if a.httpErr != nil { + if a.httpErr != nil { // Truncate the googleapi.Error message because it dumps the Details in // an ugly way. msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message) + } else if a.status != nil { + msg = a.err.Error() } return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details)) } @@ -236,6 +241,9 @@ func (a *APIError) Metadata() map[string]string { // setDetailsFromError parses a Status error or a googleapi.Error // and sets status and details or httpErr and details, respectively. // It returns false if neither Status nor googleapi.Error can be parsed. +// When err is a googleapi.Error, the status of the returned error will +// be set to an Unknown error, rather than nil, since a nil code is +// interpreted as OK in the gRPC status package. func (a *APIError) setDetailsFromError(err error) bool { st, isStatus := status.FromError(err) var herr *googleapi.Error @@ -248,6 +256,7 @@ func (a *APIError) setDetailsFromError(err error) bool { case isHTTPErr: a.httpErr = herr a.details = parseHTTPDetails(herr) + a.status = status.New(codes.Unknown, herr.Message) default: return false } @@ -340,3 +349,13 @@ func parseHTTPDetails(gae *googleapi.Error) ErrDetails { return parseDetails(details) } + +// HTTPCode returns the underlying HTTP response status code. This method returns +// `-1` if the underlying error is a [google.golang.org/grpc/status.Status]. To +// check gRPC error codes use [google.golang.org/grpc/status.Code]. +func (a *APIError) HTTPCode() int { + if a.httpErr == nil { + return -1 + } + return a.httpErr.Code +} diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go index e0920055..c52e03f6 100644 --- a/vendor/github.com/googleapis/gax-go/v2/call_option.go +++ b/vendor/github.com/googleapis/gax-go/v2/call_option.go @@ -218,6 +218,14 @@ func (p pathOpt) Resolve(s *CallSettings) { s.Path = p.p } +type timeoutOpt struct { + t time.Duration +} + +func (t timeoutOpt) Resolve(s *CallSettings) { + s.timeout = t.t +} + // WithPath applies a Path override to the HTTP-based APICall. // // This is for internal use only. @@ -230,6 +238,15 @@ func WithGRPCOptions(opt ...grpc.CallOption) CallOption { return grpcOpt(append([]grpc.CallOption(nil), opt...)) } +// WithTimeout is a convenience option for setting a context.WithTimeout on the +// singular context.Context used for **all** APICall attempts. Calculated from +// the start of the first APICall attempt. +// If the context.Context provided to Invoke already has a Deadline set, that +// will always be respected over the deadline calculated using this option. +func WithTimeout(t time.Duration) CallOption { + return &timeoutOpt{t: t} +} + // CallSettings allow fine-grained control over how calls are made. type CallSettings struct { // Retry returns a Retryer to be used to control retry logic of a method call. @@ -241,4 +258,8 @@ type CallSettings struct { // Path is an HTTP override for an APICall. Path string + + // Timeout defines the amount of time that Invoke has to complete. + // Unexported so it cannot be changed by the code in an APICall. + timeout time.Duration } diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go index 139371a0..6488461f 100644 --- a/vendor/github.com/googleapis/gax-go/v2/header.go +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -29,7 +29,73 @@ package gax -import "bytes" +import ( + "bytes" + "runtime" + "strings" + "unicode" +) + +var ( + // GoVersion is a header-safe representation of the current runtime + // environment's Go version. This is for GAX consumers that need to + // report the Go runtime version in API calls. + GoVersion string + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +func init() { + GoVersion = goVersion() +} + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return "UNKNOWN" +} // XGoogHeader is for use by the Google Cloud Libraries only. // diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index 0ba5da1d..374dcdb1 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.7.0" +const Version = "2.11.0" diff --git a/vendor/github.com/googleapis/gax-go/v2/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go index 9fcc2995..721d1af5 100644 --- a/vendor/github.com/googleapis/gax-go/v2/invoke.go +++ b/vendor/github.com/googleapis/gax-go/v2/invoke.go @@ -68,6 +68,16 @@ type sleeper func(ctx context.Context, d time.Duration) error // invoke implements Invoke, taking an additional sleeper argument for testing. func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { var retryer Retryer + + // Only use the value provided via WithTimeout if the context doesn't + // already have a deadline. This is important for backwards compatibility if + // the user already set a deadline on the context given to Invoke. + if _, ok := ctx.Deadline(); !ok && settings.timeout != 0 { + c, cc := context.WithTimeout(ctx, settings.timeout) + defer cc() + ctx = c + } + for { err := call(ctx, settings) if err == nil { diff --git a/vendor/github.com/klauspost/compress/.gitattributes b/vendor/github.com/klauspost/compress/.gitattributes new file mode 100644 index 00000000..40243359 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitattributes @@ -0,0 +1,2 @@ +* -text +*.bin -text -diff diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore new file mode 100644 index 00000000..d31b3781 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +/s2/cmd/_s2sx/sfx-exe + +# Linux perf files +perf.data +perf.data.old + +# gdb history +.gdb_history diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml new file mode 100644 index 00000000..0af08e65 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -0,0 +1,141 @@ +# This is an example goreleaser.yaml file with some sane defaults. +# Make sure to check the documentation at http://goreleaser.com +before: + hooks: + - ./gen.sh + - go install mvdan.cc/garble@latest + +builds: + - + id: "s2c" + binary: s2c + main: ./s2/cmd/s2c/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + - + id: "s2d" + binary: s2d + main: ./s2/cmd/s2d/main.go + flags: + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + - + id: "s2sx" + binary: s2sx + main: ./s2/cmd/_s2sx/main.go + flags: + - -modfile=s2sx.mod + - -trimpath + env: + - CGO_ENABLED=0 + goos: + - aix + - linux + - freebsd + - netbsd + - windows + - darwin + goarch: + - 386 + - amd64 + - arm + - arm64 + - ppc64 + - ppc64le + - mips64 + - mips64le + goarm: + - 7 + gobinary: garble + +archives: + - + id: s2-binaries + name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}" + replacements: + aix: AIX + darwin: OSX + linux: Linux + windows: Windows + 386: i386 + amd64: x86_64 + freebsd: FreeBSD + netbsd: NetBSD + format_overrides: + - goos: windows + format: zip + files: + - unpack/* + - s2/LICENSE + - s2/README.md +checksum: + name_template: 'checksums.txt' +snapshot: + name_template: "{{ .Tag }}-next" +changelog: + sort: asc + filters: + exclude: + - '^doc:' + - '^docs:' + - '^test:' + - '^tests:' + - '^Update\sREADME.md' + +nfpms: + - + file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}" + vendor: Klaus Post + homepage: https://github.com/klauspost/compress + maintainer: Klaus Post + description: S2 Compression Tool + license: BSD 3-Clause + formats: + - deb + - rpm + replacements: + darwin: Darwin + linux: Linux + freebsd: FreeBSD + amd64: x86_64 diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 00000000..87d55747 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,304 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------ + +Files: gzhttp/* + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2017 The New York Times Company + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 00000000..3c00c1af --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,578 @@ +# compress + +This package provides various compression algorithms. + +* [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression in pure Go. +* [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) is a high performance replacement for Snappy. +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). +* [snappy](https://github.com/klauspost/compress/tree/master/snappy) is a drop-in replacement for `github.com/golang/snappy` offering better compression and concurrent streams. +* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. +* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. +* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. +* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here. + +[![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) +[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# changelog + +* Sept 16, 2022 (v1.15.10) + + * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 + * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 + * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 + * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 + * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 + * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 + * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 + +* July 21, 2022 (v1.15.9) + + * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 + * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 + * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + +* July 13, 2022 (v1.15.8) + + * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 + * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 + * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 + * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 + * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 + * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 + * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 + +* June 29, 2022 (v1.15.7) + + * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 + * zip: Merge upstream https://github.com/klauspost/compress/pull/631 + * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 + * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 + * flate: Faster histograms https://github.com/klauspost/compress/pull/620 + * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 + +* June 3, 2022 (v1.15.6) + * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 + * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 + * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 + * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 + * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 + * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 + * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 + * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 + * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 + * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 + +* May 25, 2022 (v1.15.5) + * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 + * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 + * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 + * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 + * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 + * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 + * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 + * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 + * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + + +* May 11, 2022 (v1.15.4) + * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) + * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) + * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) + * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) + +* May 5, 2022 (v1.15.3) + * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) + * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) + +* Apr 26, 2022 (v1.15.2) + * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) + * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) + * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) + * Minimum version is Go 1.16, added CI test on 1.18. + +* Mar 11, 2022 (v1.15.1) + * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) + * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) + * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) + * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) + * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) + +* Mar 3, 2022 (v1.15.0) + * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) + * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + +Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. + +Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. + +While the release has been extensively tested, it is recommended to testing when upgrading. + +
+ See changes to v1.14.x + +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + +* Jan 25, 2022 (v1.14.2) + * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) + * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) + * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) + * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) + * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) + * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) + +* Jan 11, 2022 (v1.14.1) + * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) + * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) + * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) + * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) + * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) +
+ +
+ See changes to v1.13.x + +* Aug 30, 2021 (v1.13.5) + * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) + * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) + * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) + * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) + +* Aug 12, 2021 (v1.13.4) + * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). + * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) + +* Aug 3, 2021 (v1.13.3) + * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) + * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) + * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) + * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) + * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) + * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) + +* Jun 14, 2021 (v1.13.1) + * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) + * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) + * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) + * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) + +* Jun 3, 2021 (v1.13.0) + * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. + * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) + * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ + +
+ See changes to v1.12.x + +* May 25, 2021 (v1.12.3) + * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) + * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) + * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) + +* Apr 27, 2021 (v1.12.2) + * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) + * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) + * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) + * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) + * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) + * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) + +* Apr 14, 2021 (v1.12.1) + * snappy package removed. Upstream added as dependency. + * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) + * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) + * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) + * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) + * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) + * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
+ +
+ See changes to v1.11.x + +* Mar 26, 2021 (v1.11.13) + * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) + * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) + * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) + * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) + +* Mar 5, 2021 (v1.11.12) + * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). + * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) + +* Mar 1, 2021 (v1.11.9) + * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) + * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) + * s2: Fix binaries. + +* Feb 25, 2021 (v1.11.8) + * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) + * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) + * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) + * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) + +* Jan 14, 2021 (v1.11.7) + * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) + * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) + * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) + * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) + * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) + +* Jan 7, 2021 (v1.11.6) + * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) + * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) + +* Dec 20, 2020 (v1.11.4) + * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) + * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) + * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) + * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) + * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) + +* Nov 15, 2020 (v1.11.3) + * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) + * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) + +* Oct 11, 2020 (v1.11.2) + * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) + +* Oct 1, 2020 (v1.11.1) + * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) + +* Sept 8, 2020 (v1.11.0) + * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) + * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) + * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) +
+ +
+ See changes to v1.10.x + +* July 8, 2020 (v1.10.11) + * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) + * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) + +* June 23, 2020 (v1.10.10) + * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) + +* June 16, 2020 (v1.10.9): + * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) + * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) + * Fuzzit tests removed. The service has been purchased and is no longer available. + +* June 5, 2020 (v1.10.8): + * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) + +* June 1, 2020 (v1.10.7): + * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) + * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) + * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) + +* May 21, 2020: (v1.10.6) + * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) + * zstd: Stricter decompression checks. + +* April 12, 2020: (v1.10.5) + * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) + +* Apr 8, 2020: (v1.10.4) + * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) +* Mar 11, 2020: (v1.10.3) + * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) + * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) + * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) + * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) + * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) + +* Feb 27, 2020: (v1.10.2) + * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) + * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) + +* Feb 18, 2020: (v1.10.1) + * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) + * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) + * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) + +* Feb 4, 2020: (v1.10.0) + * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) + * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) + * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) + * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) + +
+ +
+ See changes prior to v1.10.0 + +* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). +* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) +* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. +* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. +* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) +* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. +* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) +* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features +* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) +* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) +* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. +* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) +* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) +* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) +* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. +* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. +* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) +* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. +* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) +* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. +* Nov 11, 2019: Reduce inflate memory use by 1KB. +* Nov 10, 2019: Less allocations in deflate bit writer. +* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. +* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) +* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) +* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) +* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) + +
+ +
+ See changes prior to v1.9.0 + +* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) +* Oct 3, 2019: Fix inconsistent results on broken zstd streams. +* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) +* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). +* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). +* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). +* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. +* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. +* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. +* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. +* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. +* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. +* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. +* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) +* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) +* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) +* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) +* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. +* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. +* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. +* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. +* June 17, 2019: zstd decompression bugfix. +* June 17, 2019: fix 32 bit builds. +* June 17, 2019: Easier use in modules (less dependencies). +* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. +* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. +* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. +* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! +* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. +* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. +* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). +* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. +* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). +* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. +* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. +* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. +* May 28, 2017: Reduce allocations when resetting decoder. +* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +
+ +# deflate usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | Documentation +|--------------------|-----------------------------------------|--------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) + +* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +Memory usage is typically 1MB for a Writer. stdlib is in the same range. +If you expect to have a lot of concurrently allocated Writers consider using +the stateless compress described below. + +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + +# Stateless compression + +This package offers stateless compression as a special option for gzip/deflate. +It will do compression but without maintaining any state between Write calls. + +This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. + +This is only relevant in cases where you expect to run many thousands of compressors concurrently, +but with very little activity. This is *not* intended for regular web servers serving individual requests. + +Because of this, the size of actual Write calls will affect output size. + +In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. + +For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) + +A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: + +``` + // replace 'ioutil.Discard' with your output. + gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) + if err != nil { + return err + } + defer gzw.Close() + + w := bufio.NewWriterSize(gzw, 4096) + defer w.Flush() + + // Write to 'w' +``` + +This will only use up to 4KB in memory when the writer is idle. + +Compression is almost always worse than the fastest compression level +and each write will allocate (a little) memory. + +# Performance Update 2018 + +It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. + +The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. + +The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. + +The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). + + +## Overall differences. + +There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. + +The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. + +This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. + +There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. + +## Web Content + +This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. + +Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. + +Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. + +## Object files + +This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. + +The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. + +The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. + +## Highly Compressible File + +This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. + +It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. + +So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". + +## Medium-High Compressible + +This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. + +We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. + +## Medium Compressible + +I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. + +The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. + + +## Un-compressible Content + +This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. + + +## Huffman only compression + +This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + +# Other packages + +Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): + +* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. +* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. +* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go new file mode 100644 index 00000000..ea5a692d --- /dev/null +++ b/vendor/github.com/klauspost/compress/compressible.go @@ -0,0 +1,85 @@ +package compress + +import "math" + +// Estimate returns a normalized compressibility estimate of block b. +// Values close to zero are likely uncompressible. +// Values above 0.1 are likely to be compressible. +// Values above 0.5 are very compressible. +// Very small lengths will return 0. +func Estimate(b []byte) float64 { + if len(b) < 16 { + return 0 + } + + // Correctly predicted order 1 + hits := 0 + lastMatch := false + var o1 [256]byte + var hist [256]int + c1 := byte(0) + for _, c := range b { + if c == o1[c1] { + // We only count a hit if there was two correct predictions in a row. + if lastMatch { + hits++ + } + lastMatch = true + } else { + lastMatch = false + } + o1[c1] = c + c1 = c + hist[c]++ + } + + // Use x^0.6 to give better spread + prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) + + // Calculate histogram distribution + variance := float64(0) + avg := float64(len(b)) / 256 + + for _, v := range hist { + Δ := float64(v) - avg + variance += Δ * Δ + } + + stddev := math.Sqrt(float64(variance)) / float64(len(b)) + exp := math.Sqrt(1 / float64(len(b))) + + // Subtract expected stddev + stddev -= exp + if stddev < 0 { + stddev = 0 + } + stddev *= 1 + exp + + // Use x^0.4 to give better spread + entropy := math.Pow(stddev, 0.4) + + // 50/50 weight between prediction and histogram distribution + return math.Pow((prediction+entropy)/2, 0.9) +} + +// ShannonEntropyBits returns the number of bits minimum required to represent +// an entropy encoding of the input bytes. +// https://en.wiktionary.org/wiki/Shannon_entropy +func ShannonEntropyBits(b []byte) int { + if len(b) == 0 { + return 0 + } + var hist [256]int + for _, c := range b { + hist[c]++ + } + shannon := float64(0) + invTotal := 1.0 / float64(len(b)) + for _, v := range hist[:] { + if v > 0 { + n := float64(v) + shannon += math.Ceil(-math.Log2(n*invTotal) * n) + } + } + return int(math.Ceil(shannon)) +} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md new file mode 100644 index 00000000..ea7324da --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/README.md @@ -0,0 +1,79 @@ +# Finite State Entropy + +This package provides Finite State Entropy encoding and decoding. + +Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) +encoding provides a fast near-optimal symbol encoding/decoding +for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) + +## News + + * Feb 2018: First implementation released. Consider this beta software for now. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `(error)` | An internal error occurred. | + +As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). + +# Performance + +A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. +All compression functions are currently only running on the calling goroutine so only one core will be used per block. + +The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input +is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be +beneficial to transpose all your input values down by 64. + +With moderate block sizes around 64k speed are typically 200MB/s per core for compression and +around 300MB/s decompression speed. + +The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. + +# Plans + +At one point, more internals will be exposed to facilitate more "expert" usage of the components. + +A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go new file mode 100644 index 00000000..f65eb390 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitreader.go @@ -0,0 +1,122 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "encoding/binary" + "errors" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) uint16 { + if n == 0 || b.bitsRead >= 64 { + return 0 + } + return b.getBitsFast(n) +} + +// getBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) getBitsFast(n uint8) uint16 { + const regMask = 64 - 1 + v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.bitsRead >= 64 && b.off == 0 +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go new file mode 100644 index 00000000..43e46361 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -0,0 +1,168 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import "fmt" + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16ZeroNC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +// This is fastest if bits can be zero. +func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { + if bits == 0 { + return + } + value <<= (16 - bits) & 15 + value >>= (16 - bits) & 15 + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush will flush all pending full bytes. +// There will be at least 56 bits available for writing when this has been called. +// Using flush32 is faster, but leaves less space for writing. +func (b *bitWriter) flush() { + v := b.nBits >> 3 + switch v { + case 0: + case 1: + b.out = append(b.out, + byte(b.bitContainer), + ) + case 2: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + ) + case 3: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + ) + case 4: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + ) + case 5: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + ) + case 6: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + ) + case 7: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + ) + case 8: + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24), + byte(b.bitContainer>>32), + byte(b.bitContainer>>40), + byte(b.bitContainer>>48), + byte(b.bitContainer>>56), + ) + default: + panic(fmt.Errorf("bits (%d) > 64", b.nBits)) + } + b.bitContainer >>= v << 3 + b.nBits &= 7 +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go new file mode 100644 index 00000000..abade2d6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/bytereader.go @@ -0,0 +1,47 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go new file mode 100644 index 00000000..6f341914 --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -0,0 +1,683 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package fse + +import ( + "errors" + "fmt" +) + +// Compress the input bytes. Input must be < 2GB. +// Provide a Scratch buffer to avoid memory allocations. +// Note that the output is also kept in the scratch buffer. +// If input is too hard to compress, ErrIncompressible is returned. +// If input is a single byte value repeated ErrUseRLE is returned. +func Compress(in []byte, s *Scratch) ([]byte, error) { + if len(in) <= 1 { + return nil, ErrIncompressible + } + if len(in) > (2<<30)-1 { + return nil, errors.New("input too big, must be < 2GB") + } + s, err := s.prepare(in) + if err != nil { + return nil, err + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + if maxCount == 0 { + maxCount = s.countSimple(in) + } + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount == len(in) { + // One symbol, use RLE + return nil, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, ErrIncompressible + } + s.optimalTableLog() + err = s.normalizeCount() + if err != nil { + return nil, err + } + err = s.writeCount() + if err != nil { + return nil, err + } + + if false { + err = s.validateNorm() + if err != nil { + return nil, err + } + } + + err = s.buildCTable() + if err != nil { + return nil, err + } + err = s.compress(in) + if err != nil { + return nil, err + } + s.Out = s.bw.out + // Check if we compressed. + if len(s.Out) >= len(in) { + return nil, ErrIncompressible + } + return s.Out, nil +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + first.deltaFindState + c.state = c.stateTable[lu] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encode(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16NC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// encode the output symbol provided and write it to the bitstream. +func (c *cState) encodeZero(symbolTT symbolTransform) { + nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 + dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState + c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) + c.state = c.stateTable[dstState] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) + c.bw.flush() +} + +// compress is the main compression loop that will encode the input from the last byte to the first. +func (s *Scratch) compress(src []byte) error { + if len(src) <= 2 { + return errors.New("compress: src too small") + } + tt := s.ct.symbolTT[:256] + s.bw.reset(s.Out) + + // Our two states each encodes every second byte. + // Last byte encoded (first byte decoded) will always be encoded by c1. + var c1, c2 cState + + // Encode so remaining size is divisible by 4. + ip := len(src) + if ip&1 == 1 { + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + c1.encodeZero(tt[src[ip-3]]) + ip -= 3 + } else { + c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) + c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) + ip -= 2 + } + if ip&2 != 0 { + c2.encodeZero(tt[src[ip-1]]) + c1.encodeZero(tt[src[ip-2]]) + ip -= 2 + } + + // Main compression loop. + switch { + case !s.zeroBits && s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush. + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case !s.zeroBits: + // We do not need to check if any output is 0 bits. + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encode(tt[v0]) + c1.encode(tt[v1]) + s.bw.flush32() + c2.encode(tt[v2]) + c1.encode(tt[v3]) + ip -= 4 + } + case s.actualTableLog <= 8: + // We can encode 4 symbols without requiring a flush + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + default: + for ip >= 4 { + s.bw.flush32() + v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + c2.encodeZero(tt[v0]) + c1.encodeZero(tt[v1]) + s.bw.flush32() + c2.encodeZero(tt[v2]) + c1.encodeZero(tt[v3]) + ip -= 4 + } + } + + // Flush final state. + // Used to initialize state when decoding. + c2.flush(s.actualTableLog) + c1.flush(s.actualTableLog) + + return s.bw.close() +} + +// writeCount will write the normalized histogram count to header. +// This is read back by readNCount. +func (s *Scratch) writeCount() error { + var ( + tableLog = s.actualTableLog + tableSize = 1 << tableLog + previous0 bool + charnum uint16 + + maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + + // Write Table Size + bitStream = uint32(tableLog - minTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + ) + if cap(s.Out) < maxHeaderSize { + s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) + } + outP := uint(0) + out := s.Out[:maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return errors.New("internal error: remaining<1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += (bitCount + 7) / 8 + + if charnum > s.symbolLen { + return errors.New("internal error: charnum > s.symbolLen") + } + s.Out = out[:outP] + return nil +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaFindState int32 + deltaNbBits uint32 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *Scratch) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *Scratch) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [maxSymbolValue + 2]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = int32(total - 1) + total++ + default: + maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = int32(total - v) + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int) { + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m) +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 + minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > maxTableLog { + tableLog = maxTableLog + } + s.actualTableLog = tableLog +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +func (s *Scratch) normalizeCount() error { + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(s.br.remain()) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(s.br.remain() >> tableLog) + ) + + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + return s.normalizeCount2() + } + s.norm[largest] += stillToDistribute + return nil +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *Scratch) normalizeCount2() error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(s.br.remain()) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// validateNorm validates the normalized histogram table. +func (s *Scratch) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 { + if previous0 { + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + n0 += 24 + if b.off < iend-5 { + b.advance(2) + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 16 + bitCount += 16 + } + } + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + for charnum < n0 { + s.norm[charnum&0xff] = 0 + charnum++ + } + + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + bitStream = b.Uint32() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*(threshold) - 1) - (remaining) + var count int32 + + if (int32(bitStream) & (threshold - 1)) < max { + count = int32(bitStream) & (threshold - 1) + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + count-- // extra accuracy + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { + b.advance(bitCount >> 3) + bitCount &= 7 + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + } + bitStream = b.Uint32() >> (bitCount & 31) + } + s.symbolLen = charnum + + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return nil +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +type decSymbol struct { + newState uint16 + symbol uint8 + nbBits uint8 +} + +// allocDtable will allocate decoding tables if they are not big enough. +func (s *Scratch) allocDtable() { + tableSize := 1 << s.actualTableLog + if cap(s.decTable) < tableSize { + s.decTable = make([]decSymbol, tableSize) + } + s.decTable = s.decTable[:tableSize] + + if cap(s.ct.tableSymbol) < 256 { + s.ct.tableSymbol = make([]byte, 256) + } + s.ct.tableSymbol = s.ct.tableSymbol[:256] + + if cap(s.ct.stateTable) < 256 { + s.ct.stateTable = make([]uint16, 256) + } + s.ct.stateTable = s.ct.stateTable[:256] +} + +// buildDtable will build the decoding table. +func (s *Scratch) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + s.allocDtable() + symbolNext := s.ct.stateTable[:256] + + // Init, lay down lowprob symbols + s.zeroBits = false + { + largeLimit := int16(1 << (s.actualTableLog - 1)) + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.decTable[highThreshold].symbol = uint8(i) + highThreshold-- + symbolNext[i] = 1 + } else { + if v >= largeLimit { + s.zeroBits = true + } + symbolNext[i] = uint16(v) + } + } + } + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.decTable[position].symbol = uint8(ss) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.decTable { + symbol := v.symbol + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.decTable[u].nbBits = nBits + newState := (nextState << nBits) - tableSize + if newState >= tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.decTable[u].newState = newState + } + } + return nil +} + +// decompress will decompress the bitstream. +// If the buffer is over-read an error is returned. +func (s *Scratch) decompress() error { + br := &s.bits + br.init(s.br.unread()) + + var s1, s2 decoder + // Initialize and decode first state and symbol. + s1.init(br, s.decTable, s.actualTableLog) + s2.init(br, s.decTable, s.actualTableLog) + + // Use temp table to avoid bound checks/append penalty. + var tmp = s.ct.tableSymbol[:256] + var off uint8 + + // Main part + if !s.zeroBits { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.nextFast() + tmp[off+1] = s2.nextFast() + br.fillFast() + tmp[off+2] = s1.nextFast() + tmp[off+3] = s2.nextFast() + off += 4 + // When off is 0, we have overflowed and should write. + if off == 0 { + s.Out = append(s.Out, tmp...) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } else { + for br.off >= 8 { + br.fillFast() + tmp[off+0] = s1.next() + tmp[off+1] = s2.next() + br.fillFast() + tmp[off+2] = s1.next() + tmp[off+3] = s2.next() + off += 4 + if off == 0 { + s.Out = append(s.Out, tmp...) + // When off is 0, we have overflowed and should write. + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + } + } + s.Out = append(s.Out, tmp[:off]...) + + // Final bits, a bit more expensive check + for { + if s1.finished() { + s.Out = append(s.Out, s1.final(), s2.final()) + break + } + br.fill() + s.Out = append(s.Out, s1.next()) + if s2.finished() { + s.Out = append(s.Out, s2.final(), s1.final()) + break + } + s.Out = append(s.Out, s2.next()) + if len(s.Out) >= s.DecompressLimit { + return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) + } + } + return br.close() +} + +// decoder keeps track of the current state and updates it from the bitstream. +type decoder struct { + state uint16 + br *bitReader + dt []decSymbol +} + +// init will initialize the decoder and read the first state from the stream. +func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { + d.dt = dt + d.br = in + d.state = in.getBits(tableLog) +} + +// next returns the next symbol and sets the next state. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) next() uint8 { + n := &d.dt[d.state] + lowBits := d.br.getBits(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} + +// finished returns true if all bits have been read from the bitstream +// and the next state would require reading bits from the input. +func (d *decoder) finished() bool { + return d.br.finished() && d.dt[d.state].nbBits > 0 +} + +// final returns the current state symbol without decoding the next. +func (d *decoder) final() uint8 { + return d.dt[d.state].symbol +} + +// nextFast returns the next symbol and sets the next state. +// This can only be used if no symbols are 0 bits. +// At least tablelog bits must be available in the bit reader. +func (d *decoder) nextFast() uint8 { + n := d.dt[d.state] + lowBits := d.br.getBitsFast(n.nbBits) + d.state = n.newState + lowBits + return n.symbol +} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go new file mode 100644 index 00000000..535cbadf --- /dev/null +++ b/vendor/github.com/klauspost/compress/fse/fse.go @@ -0,0 +1,144 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +// Package fse provides Finite State Entropy encoding and decoding. +// +// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding +// for byte blocks as implemented in zstd. +// +// See https://github.com/klauspost/compress/tree/master/fse for more information. +package fse + +import ( + "errors" + "fmt" + "math/bits" +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = 14 + defaultMemoryUsage = 13 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + defaultTablelog = defaultMemoryUsage - 2 + minTablelog = 5 + maxSymbolValue = 255 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") +) + +// Scratch provides temporary storage for compression and decompression. +type Scratch struct { + // Private + count [maxSymbolValue + 1]uint32 + norm [maxSymbolValue + 1]int16 + br byteReader + bits bitReader + bw bitWriter + ct cTable // Compression tables. + decTable []decSymbol // Decompression table. + maxCount int // count of the most probable symbol + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // DecompressLimit limits the maximum decoded size acceptable. + // If > 0 decompression will stop when approximately this many bytes + // has been decoded. + // If 0, maximum size will be 2GB. + DecompressLimit int + + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + TableLog uint8 +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +// The returned slice will always be length 256. +func (s *Scratch) Histogram() []uint32 { + return s.count[:] +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// prepare will prepare and allocate scratch tables used for both compression and decompression. +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = 255 + } + if s.TableLog == 0 { + s.TableLog = defaultTablelog + } + if s.TableLog > maxTableLog { + return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + s.br.init(in) + if s.DecompressLimit == 0 { + // Max size 2GB. + s.DecompressLimit = (2 << 30) - 1 + } + + return s, nil +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh new file mode 100644 index 00000000..aff94220 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gen.sh @@ -0,0 +1,4 @@ +#!/bin/sh + +cd s2/cmd/_s2sx/ || exit 1 +go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore new file mode 100644 index 00000000..b3d26295 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/.gitignore @@ -0,0 +1 @@ +/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md new file mode 100644 index 00000000..8b6e5c66 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/README.md @@ -0,0 +1,89 @@ +# Huff0 entropy compression + +This package provides Huff0 encoding and decoding as used in zstd. + +[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), +a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU +(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. + +This can be used for compressing input with a lot of similar input values to the smallest number of bytes. +This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, +but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. + +* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) + +## News + +This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. + +This ensures that most functionality is well tested. + +# Usage + +This package provides a low level interface that allows to compress single independent blocks. + +Each block is separate, and there is no built in integrity checks. +This means that the caller should keep track of block sizes and also do checksums if needed. + +Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and +[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. +You must provide input and will receive the output and maybe an error. + +These error values can be returned: + +| Error | Description | +|---------------------|-----------------------------------------------------------------------------| +| `` | Everything ok, output is returned | +| `ErrIncompressible` | Returned when input is judged to be too hard to compress | +| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | +| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | +| `(error)` | An internal error occurred. | + + +As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. + +To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object +that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same +object can be used for both. + +Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this +you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. + +The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. + +## Tables and re-use + +Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. + +The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) +that controls this behaviour. See the documentation for details. This can be altered between each block. + +Do however note that this information is *not* stored in the output block and it is up to the users of the package to +record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, +based on the boolean reported back from the CompressXX call. + +If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the +[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. + +## Decompressing + +The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). +This will initialize the decoding tables. +You can supply the complete block to `ReadTable` and it will return the data part of the block +which can be given to the decompressor. + +Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) +or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. + +For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. + +You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back +your input was likely corrupted. + +It is important to note that a successful decoding does *not* mean your output matches your original input. +There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. + +# Contributing + +Contributions are always welcome. Be aware that adding public functions will require good justification and breaking +changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go new file mode 100644 index 00000000..504a7be9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -0,0 +1,233 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderBytes struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderBytes) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderBytes) peekByteFast() uint8 { + got := uint8(b.value >> 56) + return got +} + +func (b *bitReaderBytes) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderBytes) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. +func (b *bitReaderBytes) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderBytes) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << (b.bitsRead - 32) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReaderBytes) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderBytes) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +// bitReaderShifted reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReaderShifted struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReaderShifted) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.advance(8 - uint8(highBit32(uint32(v)))) + return nil +} + +// peekBitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { + return uint16(b.value >> ((64 - n) & 63)) +} + +func (b *bitReaderShifted) advance(n uint8) { + b.bitsRead += n + b.value <<= n & 63 +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReaderShifted) fillFast() { + if b.bitsRead < 32 { + return + } + + // 2 bounds checks. + v := b.in[b.off-4 : b.off] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. +func (b *bitReaderShifted) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReaderShifted) fill() { + if b.bitsRead < 32 { + return + } + if b.off > 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value |= uint64(low) << ((b.bitsRead - 32) & 63) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) + b.bitsRead -= 8 + b.off-- + } +} + +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReaderShifted) close() error { + // Release reference. + b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go new file mode 100644 index 00000000..ec71f7a3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -0,0 +1,95 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encSymbol(ct cTable, symbol byte) { + enc := ct[symbol] + b.bitContainer |= uint64(enc.val) << (b.nBits & 63) + if false { + if enc.nBits == 0 { + panic("nbits 0") + } + } + b.nBits += enc.nBits +} + +// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { + encA := ct[av] + encB := ct[bv] + sh := b.nBits & 63 + combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) + b.bitContainer |= combined << sh + if false { + if encA.nBits == 0 { + panic("nbitsA 0") + } + if encB.nBits == 0 { + panic("nbitsB 0") + } + } + b.nBits += encA.nBits + encB.nBits +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go new file mode 100644 index 00000000..4dcab8d2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go @@ -0,0 +1,44 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package huff0 + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// init will initialize the reader and set the input. +func (b *byteReader) init(in []byte) { + b.b = in + b.off = 0 +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + v3 := int32(b.b[b.off+3]) + v2 := int32(b.b[b.off+2]) + v1 := int32(b.b[b.off+1]) + v0 := int32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + v3 := uint32(b.b[b.off+3]) + v2 := uint32(b.b[b.off+2]) + v1 := uint32(b.b[b.off+1]) + v0 := uint32(b.b[b.off]) + return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go new file mode 100644 index 00000000..4d14542f --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -0,0 +1,730 @@ +package huff0 + +import ( + "fmt" + "math" + "runtime" + "sync" +) + +// Compress1X will compress the input. +// The output can be decoded using Decompress1X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + return compress(in, s, s.compress1X) +} + +// Compress4X will compress the input. The input is split into 4 independent blocks +// and compressed similar to Compress1X. +// The output can be decoded using Decompress4X. +// Supply a Scratch object. The scratch object contains state about re-use, +// So when sharing across independent encodes, be sure to set the re-use policy. +func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { + s, err = s.prepare(in) + if err != nil { + return nil, false, err + } + if false { + // TODO: compress4Xp only slightly faster. + const parallelThreshold = 8 << 10 + if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { + return compress(in, s, s.compress4X) + } + return compress(in, s, s.compress4Xp) + } + return compress(in, s, s.compress4X) +} + +func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { + // Nuke previous table if we cannot reuse anyway. + if s.Reuse == ReusePolicyNone { + s.prevTable = s.prevTable[:0] + } + + // Create histogram, if none was provided. + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return nil, false, ErrIncompressible + } + // One symbol, use RLE + return nil, false, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return nil, false, ErrIncompressible + } + if s.Reuse == ReusePolicyMust && !canReuse { + // We must reuse, but we can't. + return nil, false, ErrIncompressible + } + if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { + keepTable := s.cTable + keepTL := s.actualTableLog + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + s.cTable = keepTable + s.actualTableLog = keepTL + if err == nil && len(s.Out) < wantSize { + s.OutData = s.Out + return s.Out, true, nil + } + if s.Reuse == ReusePolicyMust { + return nil, false, ErrIncompressible + } + // Do not attempt to re-use later. + s.prevTable = s.prevTable[:0] + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return nil, false, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + if s.Reuse == ReusePolicyAllow && canReuse { + hSize := len(s.Out) + oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) + newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) + if oldSize <= hSize+newSize || hSize+12 >= wantSize { + // Retain cTable even if we re-use. + keepTable := s.cTable + keepTL := s.actualTableLog + + s.cTable = s.prevTable + s.actualTableLog = s.prevTableLog + s.Out, err = compressor(in) + + // Restore ctable. + s.cTable = keepTable + s.actualTableLog = keepTL + if err != nil { + return nil, false, err + } + if len(s.Out) >= wantSize { + return nil, false, ErrIncompressible + } + s.OutData = s.Out + return s.Out, true, nil + } + } + + // Use new table + err = s.cTable.write(s) + if err != nil { + s.OutTable = nil + return nil, false, err + } + s.OutTable = s.Out + + // Compress using new table + s.Out, err = compressor(in) + if err != nil { + s.OutTable = nil + return nil, false, err + } + if len(s.Out) >= wantSize { + s.OutTable = nil + return nil, false, ErrIncompressible + } + // Move current table into previous. + s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] + s.OutData = s.Out[len(s.OutTable):] + return s.Out, false, nil +} + +// EstimateSizes will estimate the data sizes +func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { + s, err = s.prepare(in) + if err != nil { + return 0, 0, 0, err + } + + // Create histogram, if none was provided. + tableSz, dataSz, reuseSz = -1, -1, -1 + maxCount := s.maxCount + var canReuse = false + if maxCount == 0 { + maxCount, canReuse = s.countSimple(in) + } else { + canReuse = s.canUseTable(s.prevTable) + } + + // We want the output size to be less than this: + wantSize := len(in) + if s.WantLogLess > 0 { + wantSize -= wantSize >> s.WantLogLess + } + + // Reset for next run. + s.clearCount = true + s.maxCount = 0 + if maxCount >= len(in) { + if maxCount > len(in) { + return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) + } + if len(in) == 1 { + return 0, 0, 0, ErrIncompressible + } + // One symbol, use RLE + return 0, 0, 0, ErrUseRLE + } + if maxCount == 1 || maxCount < (len(in)>>7) { + // Each symbol present maximum once or too well distributed. + return 0, 0, 0, ErrIncompressible + } + + // Calculate new table. + err = s.buildCTable() + if err != nil { + return 0, 0, 0, err + } + + if false && !s.canUseTable(s.cTable) { + panic("invalid table generated") + } + + tableSz, err = s.cTable.estTableSize(s) + if err != nil { + return 0, 0, 0, err + } + if canReuse { + reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) + } + dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) + + // Restore + return tableSz, dataSz, reuseSz, nil +} + +func (s *Scratch) compress1X(src []byte) ([]byte, error) { + return s.compress1xDo(s.Out, src) +} + +func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) { + var bw = bitWriter{out: dst} + + // N is length divisible by 4. + n := len(src) + n -= n & 3 + cTable := s.cTable[:256] + + // Encode last bytes. + for i := len(src) & 3; i > 0; i-- { + bw.encSymbol(cTable, src[n+i-1]) + } + n -= 4 + if s.actualTableLog <= 8 { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } else { + for ; n >= 0; n -= 4 { + tmp := src[n : n+4] + // tmp should be len 4 + bw.flush32() + bw.encTwoSymbols(cTable, tmp[3], tmp[2]) + bw.flush32() + bw.encTwoSymbols(cTable, tmp[1], tmp[0]) + } + } + err := bw.close() + return bw.out, err +} + +var sixZeros [6]byte + +func (s *Scratch) compress4X(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + segmentSize := (len(src) + 3) / 4 + + // Add placeholder for output length + offsetIdx := len(s.Out) + s.Out = append(s.Out, sixZeros[:]...) + + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + var err error + idx := len(s.Out) + s.Out, err = s.compress1xDo(s.Out, toDo) + if err != nil { + return nil, err + } + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + length := len(s.Out) - idx + s.Out[i*2+offsetIdx] = byte(length) + s.Out[i*2+offsetIdx+1] = byte(length >> 8) + } + } + + return s.Out, nil +} + +// compress4Xp will compress 4 streams using separate goroutines. +func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { + if len(src) < 12 { + return nil, ErrIncompressible + } + // Add placeholder for output length + s.Out = s.Out[:6] + + segmentSize := (len(src) + 3) / 4 + var wg sync.WaitGroup + var errs [4]error + wg.Add(4) + for i := 0; i < 4; i++ { + toDo := src + if len(toDo) > segmentSize { + toDo = toDo[:segmentSize] + } + src = src[len(toDo):] + + // Separate goroutine for each block. + go func(i int) { + s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) + wg.Done() + }(i) + } + wg.Wait() + for i := 0; i < 4; i++ { + if errs[i] != nil { + return nil, errs[i] + } + o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } + // Write compressed length as little endian before block. + if i < 3 { + // Last length is not written. + s.Out[i*2] = byte(len(o)) + s.Out[i*2+1] = byte(len(o) >> 8) + } + + // Write output. + s.Out = append(s.Out, o...) + } + return s.Out, nil +} + +// countSimple will create a simple histogram in s.count. +// Returns the biggest count. +// Does not update s.clearCount. +func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { + reuse = true + for _, v := range in { + s.count[v]++ + } + m := uint32(0) + if len(s.prevTable) > 0 { + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else { + if s.prevTable[i].nBits == 0 { + reuse = false + } + } + } + } + return int(m), reuse + } + for i, v := range s.count[:] { + if v > m { + m = v + } + if v > 0 { + s.symbolLen = uint16(i) + 1 + } + } + return int(m), false +} + +func (s *Scratch) canUseTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 && c[i].nBits == 0 { + return false + } + } + return true +} + +//lint:ignore U1000 used for debugging +func (s *Scratch) validateTable(c cTable) bool { + if len(c) < int(s.symbolLen) { + return false + } + for i, v := range s.count[:s.symbolLen] { + if v != 0 { + if c[i].nBits == 0 { + return false + } + if c[i].nBits > s.actualTableLog { + return false + } + } + } + return true +} + +// minTableLog provides the minimum logSize to safely represent a distribution. +func (s *Scratch) minTableLog() uint8 { + minBitsSrc := highBit32(uint32(s.br.remain())) + 1 + minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 + if minBitsSrc < minBitsSymbols { + return uint8(minBitsSrc) + } + return uint8(minBitsSymbols) +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *Scratch) optimalTableLog() { + tableLog := s.TableLog + minBits := s.minTableLog() + maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minTablelog { + tableLog = minTablelog + } + if tableLog > tableLogMax { + tableLog = tableLogMax + } + s.actualTableLog = tableLog +} + +type cTableEntry struct { + val uint16 + nBits uint8 + // We have 8 bits extra +} + +const huffNodesMask = huffNodesLen - 1 + +func (s *Scratch) buildCTable() error { + s.optimalTableLog() + s.huffSort() + if cap(s.cTable) < maxSymbolValue+1 { + s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) + } else { + s.cTable = s.cTable[:s.symbolLen] + for i := range s.cTable { + s.cTable[i] = cTableEntry{} + } + } + + var startNode = int16(s.symbolLen) + nonNullRank := s.symbolLen - 1 + + nodeNb := startNode + huffNode := s.nodes[1 : huffNodesLen+1] + + // This overlays the slice above, but allows "-1" index lookups. + // Different from reference implementation. + huffNode0 := s.nodes[0 : huffNodesLen+1] + + for huffNode[nonNullRank].count == 0 { + nonNullRank-- + } + + lowS := int16(nonNullRank) + nodeRoot := nodeNb + lowS - 1 + lowN := nodeNb + huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count + huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + lowS -= 2 + for n := nodeNb; n <= nodeRoot; n++ { + huffNode[n].count = 1 << 30 + } + // fake entry, strong barrier + huffNode0[0].count = 1 << 31 + + // create parents + for nodeNb <= nodeRoot { + var n1, n2 int16 + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n1 = lowS + lowS-- + } else { + n1 = lowN + lowN++ + } + if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + n2 = lowS + lowS-- + } else { + n2 = lowN + lowN++ + } + + huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count + huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + nodeNb++ + } + + // distribute weights (unlimited tree height) + huffNode[nodeRoot].nbBits = 0 + for n := nodeRoot - 1; n >= startNode; n-- { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + for n := uint16(0); n <= nonNullRank; n++ { + huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + } + s.actualTableLog = s.setMaxHeight(int(nonNullRank)) + maxNbBits := s.actualTableLog + + // fill result into tree (val, nbBits) + if maxNbBits > tableLogMax { + return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) + } + var nbPerRank [tableLogMax + 1]uint16 + var valPerRank [16]uint16 + for _, v := range huffNode[:nonNullRank+1] { + nbPerRank[v.nbBits]++ + } + // determine stating value per rank + { + min := uint16(0) + for n := maxNbBits; n > 0; n-- { + // get starting value within each rank + valPerRank[n] = min + min += nbPerRank[n] + min >>= 1 + } + } + + // push nbBits per symbol, symbol order + for _, v := range huffNode[:nonNullRank+1] { + s.cTable[v.symbol].nBits = v.nbBits + } + + // assign value within rank, symbol order + t := s.cTable[:s.symbolLen] + for n, val := range t { + nbits := val.nBits & 15 + v := valPerRank[nbits] + t[n].val = v + valPerRank[nbits] = v + 1 + } + + return nil +} + +// huffSort will sort symbols, decreasing order. +func (s *Scratch) huffSort() { + type rankPos struct { + base uint32 + current uint32 + } + + // Clear nodes + nodes := s.nodes[:huffNodesLen+1] + s.nodes = nodes + nodes = nodes[1 : huffNodesLen+1] + + // Sort into buckets based on length of symbol count. + var rank [32]rankPos + for _, v := range s.count[:s.symbolLen] { + r := highBit32(v+1) & 31 + rank[r].base++ + } + // maxBitLength is log2(BlockSizeMax) + 1 + const maxBitLength = 18 + 1 + for n := maxBitLength; n > 0; n-- { + rank[n-1].base += rank[n].base + } + for n := range rank[:maxBitLength] { + rank[n].current = rank[n].base + } + for n, c := range s.count[:s.symbolLen] { + r := (highBit32(c+1) + 1) & 31 + pos := rank[r].current + rank[r].current++ + prev := nodes[(pos-1)&huffNodesMask] + for pos > rank[r].base && c > prev.count { + nodes[pos&huffNodesMask] = prev + pos-- + prev = nodes[(pos-1)&huffNodesMask] + } + nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + } +} + +func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { + maxNbBits := s.actualTableLog + huffNode := s.nodes[1 : huffNodesLen+1] + //huffNode = huffNode[: huffNodesLen] + + largestBits := huffNode[lastNonNull].nbBits + + // early exit : no elt > maxNbBits + if largestBits <= maxNbBits { + return largestBits + } + totalCost := int(0) + baseCost := int(1) << (largestBits - maxNbBits) + n := uint32(lastNonNull) + + for huffNode[n].nbBits > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) + huffNode[n].nbBits = maxNbBits + n-- + } + // n stops at huffNode[n].nbBits <= maxNbBits + + for huffNode[n].nbBits == maxNbBits { + n-- + } + // n end at index of smallest symbol using < maxNbBits + + // renorm totalCost + totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ + + // repay normalized cost + { + const noSymbol = 0xF0F0F0F0 + var rankLast [tableLogMax + 2]uint32 + + for i := range rankLast[:] { + rankLast[i] = noSymbol + } + + // Get pos of last (smallest) symbol per rank + { + currentNbBits := maxNbBits + for pos := int(n); pos >= 0; pos-- { + if huffNode[pos].nbBits >= currentNbBits { + continue + } + currentNbBits = huffNode[pos].nbBits // < maxNbBits + rankLast[maxNbBits-currentNbBits] = uint32(pos) + } + } + + for totalCost > 0 { + nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 + + for ; nBitsToDecrease > 1; nBitsToDecrease-- { + highPos := rankLast[nBitsToDecrease] + lowPos := rankLast[nBitsToDecrease-1] + if highPos == noSymbol { + continue + } + if lowPos == noSymbol { + break + } + highTotal := huffNode[highPos].count + lowTotal := 2 * huffNode[lowPos].count + if highTotal <= lowTotal { + break + } + } + // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) + // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary + // FIXME: try to remove + for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { + nBitsToDecrease++ + } + totalCost -= 1 << (nBitsToDecrease - 1) + if rankLast[nBitsToDecrease-1] == noSymbol { + // this rank is no longer empty + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] + } + huffNode[rankLast[nBitsToDecrease]].nbBits++ + if rankLast[nBitsToDecrease] == 0 { + /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol + } else { + rankLast[nBitsToDecrease]-- + if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ + } + } + } + + for totalCost < 0 { /* Sometimes, cost correction overshoot */ + if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + for huffNode[n].nbBits == maxNbBits { + n-- + } + huffNode[n+1].nbBits-- + rankLast[1] = n + 1 + totalCost++ + continue + } + huffNode[rankLast[1]+1].nbBits-- + rankLast[1]++ + totalCost++ + } + } + return maxNbBits +} + +type nodeElt struct { + count uint32 + parent uint16 + symbol byte + nbBits uint8 +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go new file mode 100644 index 00000000..42a237ea --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -0,0 +1,1167 @@ +package huff0 + +import ( + "errors" + "fmt" + "io" + "sync" + + "github.com/klauspost/compress/fse" +) + +type dTable struct { + single []dEntrySingle +} + +// single-symbols decoding +type dEntrySingle struct { + entry uint16 +} + +// Uses special code for all tables that are < 8 bits. +const use8BitTables = true + +// ReadTable will read a table from the input. +// The size of the input may be larger than the table definition. +// Any content remaining after the table definition will be returned. +// If no Scratch is provided a new one is allocated. +// The returned Scratch can be used for encoding or decoding input using this table. +func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { + s, err = s.prepare(nil) + if err != nil { + return s, nil, err + } + if len(in) <= 1 { + return s, nil, errors.New("input too small for table") + } + iSize := in[0] + in = in[1:] + if iSize >= 128 { + // Uncompressed + oSize := iSize - 127 + iSize = (oSize + 1) / 2 + if int(iSize) > len(in) { + return s, nil, errors.New("input too small for table") + } + for n := uint8(0); n < oSize; n += 2 { + v := in[n/2] + s.huffWeight[n] = v >> 4 + s.huffWeight[n+1] = v & 15 + } + s.symbolLen = uint16(oSize) + in = in[iSize:] + } else { + if len(in) < int(iSize) { + return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) + } + // FSE compressed weights + s.fse.DecompressLimit = 255 + hw := s.huffWeight[:] + s.fse.Out = hw + b, err := fse.Decompress(in[:iSize], s.fse) + s.fse.Out = nil + if err != nil { + return s, nil, err + } + if len(b) > 255 { + return s, nil, errors.New("corrupt input: output table too large") + } + s.symbolLen = uint16(len(b)) + in = in[iSize:] + } + + // collect weight stats + var rankStats [16]uint32 + weightTotal := uint32(0) + for _, v := range s.huffWeight[:s.symbolLen] { + if v > tableLogMax { + return s, nil, errors.New("corrupt input: weight too large") + } + v2 := v & 15 + rankStats[v2]++ + // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. + weightTotal += (1 << v2) >> 1 + } + if weightTotal == 0 { + return s, nil, errors.New("corrupt input: weights zero") + } + + // get last non-null symbol weight (implied, total must be 2^n) + { + tableLog := highBit32(weightTotal) + 1 + if tableLog > tableLogMax { + return s, nil, errors.New("corrupt input: tableLog too big") + } + s.actualTableLog = uint8(tableLog) + // determine last weight + { + total := uint32(1) << tableLog + rest := total - weightTotal + verif := uint32(1) << highBit32(rest) + lastWeight := highBit32(rest) + 1 + if verif != rest { + // last value must be a clean power of 2 + return s, nil, errors.New("corrupt input: last value not power of two") + } + s.huffWeight[s.symbolLen] = uint8(lastWeight) + s.symbolLen++ + rankStats[lastWeight]++ + } + } + + if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { + // by construction : at least 2 elts of rank 1, must be even + return s, nil, errors.New("corrupt input: min elt size, even check failed ") + } + + // TODO: Choose between single/double symbol decoding + + // Calculate starting value for each rank + { + var nextRankStart uint32 + for n := uint8(1); n < s.actualTableLog+1; n++ { + current := nextRankStart + nextRankStart += rankStats[n] << (n - 1) + rankStats[n] = current + } + } + + // fill DTable (always full size) + tSize := 1 << tableLogMax + if len(s.dt.single) != tSize { + s.dt.single = make([]dEntrySingle, tSize) + } + cTable := s.prevTable + if cap(cTable) < maxSymbolValue+1 { + cTable = make([]cTableEntry, 0, maxSymbolValue+1) + } + cTable = cTable[:maxSymbolValue+1] + s.prevTable = cTable[:s.symbolLen] + s.prevTableLog = s.actualTableLog + + for n, w := range s.huffWeight[:s.symbolLen] { + if w == 0 { + cTable[n] = cTableEntry{ + val: 0, + nBits: 0, + } + continue + } + length := (uint32(1) << w) >> 1 + d := dEntrySingle{ + entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), + } + + rank := &rankStats[w] + cTable[n] = cTableEntry{ + val: uint16(*rank >> (w - 1)), + nBits: uint8(d.entry), + } + + single := s.dt.single[*rank : *rank+length] + for i := range single { + single[i] = d + } + *rank += length + } + + return s, in, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { + if cap(s.Out) < s.MaxDecodedSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:s.MaxDecodedSize] + s.Out, err = s.Decoder().Decompress1X(s.Out, in) + return s.Out, err +} + +// Decompress4X will decompress a 4X encoded stream. +// Before this is called, the table must be initialized with ReadTable unless +// the encoder re-used the table. +// The length of the supplied input must match the end of a block exactly. +// The destination size of the uncompressed data must be known and provided. +// deprecated: Use the stateless Decoder() to get a concurrent version. +func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { + if dstSize > s.MaxDecodedSize { + return nil, ErrMaxDecodedSizeExceeded + } + if cap(s.Out) < dstSize { + s.Out = make([]byte, s.MaxDecodedSize) + } + s.Out = s.Out[:0:dstSize] + s.Out, err = s.Decoder().Decompress4X(s.Out, in) + return s.Out, err +} + +// Decoder will return a stateless decoder that can be used by multiple +// decompressors concurrently. +// Before this is called, the table must be initialized with ReadTable. +// The Decoder is still linked to the scratch buffer so that cannot be reused. +// However, it is safe to discard the scratch. +func (s *Scratch) Decoder() *Decoder { + return &Decoder{ + dt: s.dt, + actualTableLog: s.actualTableLog, + bufs: &s.decPool, + } +} + +// Decoder provides stateless decoding. +type Decoder struct { + dt dTable + actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress1X8BitExactly(dst, src) + } + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + switch d.actualTableLog { + case 8: + const shift = 8 - 8 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 7: + const shift = 8 - 7 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 6: + const shift = 8 - 6 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 5: + const shift = 8 - 5 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 4: + const shift = 8 - 4 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 3: + const shift = 8 - 3 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 2: + const shift = 8 - 2 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + case 1: + const shift = 8 - 1 + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>(56+shift))] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + default: + d.bufs.Put(bufs) + return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + shift := (8 - d.actualTableLog) & 7 + + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()>>shift] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { + var br bitReaderBytes + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + dt := d.dt.single[:256] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + const shift = 56 + + //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) + for br.off >= 4 { + br.fillFast() + v := dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[uint8(br.value>>shift)] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 4, so uint8 is fine + bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) + for bitsLeft > 0 { + if br.bitsRead >= 64-8 { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := dt[br.peekByteFast()] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= int8(nBits) + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { + if d.actualTableLog == 8 { + return d.decompress4X8bitExactly(dst, src) + } + + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + shift := (56 + (8 - d.actualTableLog)) & 63 + + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[uint8(br.value>>shift)].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { + var br [4]bitReaderBytes + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const shift = 56 + const tlSize = 1 << 8 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 4 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + // Interleave 2 decodes. + const stream = 0 + const stream2 = 1 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() + + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) + + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) + } + + off += 4 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + // copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + if br.finished() { + d.bufs.Put(buf) + return nil, io.ErrUnexpectedEOF + } + if br.bitsRead >= 56 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value |= uint64(low) << (br.bitsRead - 32) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) + br.bitsRead -= 8 + br.off-- + } + } + } + // end inline... + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + v := single[br.peekByteFast()].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + d.bufs.Put(buf) + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// matches will compare a decoding table to a coding table. +// Errors are written to the writer. +// Nothing will be written if table is ok. +func (s *Scratch) matches(ct cTable, w io.Writer) { + if s == nil || len(s.dt.single) == 0 { + return + } + dt := s.dt.single[:1<>8) == byte(sym) { + fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) + errs++ + break + } + } + if errs == 0 { + broken-- + } + continue + } + // Unused bits in input + ub := tablelog - enc.nBits + top := enc.val << ub + // decoder looks at top bits. + dec := dt[top] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 0 { + fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + continue + } + // Ensure that all combinations are covered. + for i := uint16(0); i < (1 << ub); i++ { + vval := top | i + dec := dt[vval] + if uint8(dec.entry) != enc.nBits { + fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) + errs++ + } + if uint8(dec.entry>>8) != uint8(sym) { + fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) + errs++ + } + if errs > 20 { + fmt.Fprintf(w, "%d errros, stopping\n", errs) + break + } + } + if errs == 0 { + ok++ + broken-- + } + } + if broken > 0 { + fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) + } +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go new file mode 100644 index 00000000..ba7e8e6b --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -0,0 +1,226 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +// This file contains the specialisation of Decoder.Decompress4X +// and Decoder.Decompress1X that use an asm implementation of thir main loops. +package huff0 + +import ( + "errors" + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog > 8. +// +//go:noescape +func decompress4x_main_loop_amd64(ctx *decompress4xContext) + +// decompress4x_8b_loop_x86 is an x86 assembler implementation +// of Decompress4X when tablelog <= 8 which decodes 4 entries +// per loop. +// +//go:noescape +func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) + +// fallback8BitSize is the size where using Go version is faster. +const fallback8BitSize = 800 + +type decompress4xContext struct { + pbr *[4]bitReaderShifted + peekBits uint8 + out *byte + dstEvery int + tbl *dEntrySingle + decoded int + limit *byte +} + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + + use8BitTables := d.actualTableLog <= 8 + if cap(dst) < fallback8BitSize && use8BitTables { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + var decoded int + + if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { + ctx := decompress4xContext{ + pbr: &br, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + out: &out[0], + dstEvery: dstEvery, + tbl: &single[0], + limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. + } + if use8BitTables { + decompress4x_8b_main_loop_amd64(&ctx) + } else { + decompress4x_main_loop_amd64(&ctx) + } + + decoded = ctx.decoded + out = out[decoded/4:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// decompress4x_main_loop_x86 is an x86 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_amd64(ctx *decompress1xContext) + +// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation +// of Decompress1X when tablelog > 8. +// +//go:noescape +func decompress1x_main_loop_bmi2(ctx *decompress1xContext) + +type decompress1xContext struct { + pbr *bitReaderShifted + peekBits uint8 + out *byte + outCap int + tbl *dEntrySingle + decoded int +} + +// Error reported by asm implementations +const error_max_decoded_size_exeeded = -1 + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:maxDecodedSize] + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + + if maxDecodedSize >= 4 { + ctx := decompress1xContext{ + pbr: &br, + out: &dst[0], + outCap: maxDecodedSize, + peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() + tbl: &d.dt.single[0], + } + + if cpuinfo.HasBMI2() { + decompress1x_main_loop_bmi2(&ctx) + } else { + decompress1x_main_loop_amd64(&ctx) + } + if ctx.decoded == error_max_decoded_size_exeeded { + return nil, ErrMaxDecodedSizeExceeded + } + + dst = dst[:ctx.decoded] + } + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if len(dst) >= maxDecodedSize { + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s new file mode 100644 index 00000000..8d2187a2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -0,0 +1,846 @@ +// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. + +//go:build amd64 && !appengine && !noasm && gc + +// func decompress4x_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_main_loop_amd64(SB), $0-8 + XORQ DX, DX + + // Preload values + MOVQ ctx+0(FP), AX + MOVBQZX 8(AX), DI + MOVQ 16(AX), SI + MOVQ 48(AX), BX + MOVQ 24(AX), R9 + MOVQ 32(AX), R10 + MOVQ (AX), R11 + + // Main loop +main_loop: + MOVQ SI, R8 + CMPQ R8, BX + SETGE DL + + // br0.fillFast32() + MOVQ 32(R11), R12 + MOVBQZX 40(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill0 + MOVQ 24(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ (R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 24(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br0.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br0.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 32(R11) + MOVB R13, 40(R11) + ADDQ R9, R8 + + // br1.fillFast32() + MOVQ 80(R11), R12 + MOVBQZX 88(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill1 + MOVQ 72(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 48(R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 72(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br1.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br1.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 80(R11) + MOVB R13, 88(R11) + ADDQ R9, R8 + + // br2.fillFast32() + MOVQ 128(R11), R12 + MOVBQZX 136(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill2 + MOVQ 120(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 96(R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 120(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br2.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br2.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 128(R11) + MOVB R13, 136(R11) + ADDQ R9, R8 + + // br3.fillFast32() + MOVQ 176(R11), R12 + MOVBQZX 184(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill3 + MOVQ 168(R11), AX + SUBQ $0x20, R13 + SUBQ $0x04, AX + MOVQ 144(R11), R14 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (AX)(R14*1), R14 + MOVQ R13, CX + SHLQ CL, R14 + MOVQ AX, 168(R11) + ORQ R14, R12 + + // exhausted = exhausted || (br3.off < 4) + CMPQ AX, $0x04 + SETLT AL + ORB AL, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br3.peekTopBits(peekBits) + MOVQ DI, CX + MOVQ R12, R14 + SHRQ CL, R14 + + // v1 := table[val1&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v1.entry)) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // these two writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + MOVW AX, (R8) + + // update the bitreader structure + MOVQ R12, 176(R11) + MOVB R13, 184(R11) + ADDQ $0x02, SI + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), SI + SHLQ $0x02, SI + MOVQ SI, 40(AX) + RET + +// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) +TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 + XORQ DX, DX + + // Preload values + MOVQ ctx+0(FP), CX + MOVBQZX 8(CX), DI + MOVQ 16(CX), BX + MOVQ 48(CX), SI + MOVQ 24(CX), R9 + MOVQ 32(CX), R10 + MOVQ (CX), R11 + + // Main loop +main_loop: + MOVQ BX, R8 + CMPQ R8, SI + SETGE DL + + // br0.fillFast32() + MOVQ 32(R11), R12 + MOVBQZX 40(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill0 + MOVQ 24(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ (R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 24(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br0.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill0: + // val0 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br0.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br0.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 32(R11) + MOVB R13, 40(R11) + ADDQ R9, R8 + + // br1.fillFast32() + MOVQ 80(R11), R12 + MOVBQZX 88(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill1 + MOVQ 72(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 48(R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 72(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br1.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill1: + // val0 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br1.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br1.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 80(R11) + MOVB R13, 88(R11) + ADDQ R9, R8 + + // br2.fillFast32() + MOVQ 128(R11), R12 + MOVBQZX 136(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill2 + MOVQ 120(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 96(R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 120(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br2.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill2: + // val0 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br2.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br2.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 128(R11) + MOVB R13, 136(R11) + ADDQ R9, R8 + + // br3.fillFast32() + MOVQ 176(R11), R12 + MOVBQZX 184(R11), R13 + CMPQ R13, $0x20 + JBE skip_fill3 + MOVQ 168(R11), R14 + SUBQ $0x20, R13 + SUBQ $0x04, R14 + MOVQ 144(R11), R15 + + // b.value |= uint64(low) << (b.bitsRead & 63) + MOVL (R14)(R15*1), R15 + MOVQ R13, CX + SHLQ CL, R15 + MOVQ R14, 168(R11) + ORQ R15, R12 + + // exhausted = exhausted || (br3.off < 4) + CMPQ R14, $0x04 + SETLT AL + ORB AL, DL + +skip_fill3: + // val0 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v0 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v0.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + + // val1 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v1 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v1.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // val2 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v2 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v2.entry) + MOVB CH, AH + SHLQ CL, R12 + ADDB CL, R13 + + // val3 := br3.peekTopBits(peekBits) + MOVQ R12, R14 + MOVQ DI, CX + SHRQ CL, R14 + + // v3 := table[val0&mask] + MOVW (R10)(R14*2), CX + + // br3.advance(uint8(v3.entry) + MOVB CH, AL + SHLQ CL, R12 + ADDB CL, R13 + BSWAPL AX + + // these four writes get coalesced + // out[id * dstEvery + 0] = uint8(v0.entry >> 8) + // out[id * dstEvery + 1] = uint8(v1.entry >> 8) + // out[id * dstEvery + 3] = uint8(v2.entry >> 8) + // out[id * dstEvery + 4] = uint8(v3.entry >> 8) + MOVL AX, (R8) + + // update the bitreader structure + MOVQ R12, 176(R11) + MOVB R13, 184(R11) + ADDQ $0x04, BX + TESTB DL, DL + JZ main_loop + MOVQ ctx+0(FP), AX + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) + RET + +// func decompress1x_main_loop_amd64(ctx *decompress1xContext) +TEXT ·decompress1x_main_loop_amd64(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exeeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exeeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_1_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), R12 + MOVQ R11, CX + SHLQ CL, R12 + ORQ R12, R10 + +bitReader_fillFast_2_end: + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + MOVQ DI, CX + MOVQ R10, R12 + SHRQ CL, R12 + MOVW (SI)(R12*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLQ CL, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exeeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET + +// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) +// Requires: BMI2 +TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 + MOVQ ctx+0(FP), CX + MOVQ 16(CX), DX + MOVQ 24(CX), BX + CMPQ BX, $0x04 + JB error_max_decoded_size_exeeded + LEAQ (DX)(BX*1), BX + MOVQ (CX), SI + MOVQ (SI), R8 + MOVQ 24(SI), R9 + MOVQ 32(SI), R10 + MOVBQZX 40(SI), R11 + MOVQ 32(CX), SI + MOVBQZX 8(CX), DI + JMP loop_condition + +main_loop: + // Check if we have room for 4 bytes in the output buffer + LEAQ 4(DX), CX + CMPQ CX, BX + JGE error_max_decoded_size_exeeded + + // Decode 4 values + CMPQ R11, $0x20 + JL bitReader_fillFast_1_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_1_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + CMPQ R11, $0x20 + JL bitReader_fillFast_2_end + SUBQ $0x20, R11 + SUBQ $0x04, R9 + MOVL (R8)(R9*1), CX + SHLXQ R11, CX, CX + ORQ CX, R10 + +bitReader_fillFast_2_end: + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AH + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + SHRXQ DI, R10, CX + MOVW (SI)(CX*2), CX + MOVB CH, AL + MOVBQZX CL, CX + ADDQ CX, R11 + SHLXQ CX, R10, R10 + BSWAPL AX + + // Store the decoded values + MOVL AX, (DX) + ADDQ $0x04, DX + +loop_condition: + CMPQ R9, $0x08 + JGE main_loop + + // Update ctx structure + MOVQ ctx+0(FP), AX + SUBQ 16(AX), DX + MOVQ DX, 40(AX) + MOVQ (AX), AX + MOVQ R9, 24(AX) + MOVQ R10, 32(AX) + MOVB R11, 40(AX) + RET + + // Report error +error_max_decoded_size_exeeded: + MOVQ ctx+0(FP), AX + MOVQ $-1, CX + MOVQ CX, 40(AX) + RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go new file mode 100644 index 00000000..908c17de --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go @@ -0,0 +1,299 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +// This file contains a generic implementation of Decoder.Decompress4X. +package huff0 + +import ( + "errors" + "fmt" +) + +// Decompress4X will decompress a 4X encoded stream. +// The length of the supplied input must match the end of a block exactly. +// The *capacity* of the dst slice must match the destination size of +// the uncompressed data exactly. +func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if len(src) < 6+(4*1) { + return nil, errors.New("input too small") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress4X8bit(dst, src) + } + + var br [4]bitReaderShifted + // Decode "jump table" + start := 6 + for i := 0; i < 3; i++ { + length := int(src[i*2]) | (int(src[i*2+1]) << 8) + if start+length >= len(src) { + return nil, errors.New("truncated input (or invalid offset)") + } + err := br[i].init(src[start : start+length]) + if err != nil { + return nil, err + } + start += length + } + err := br[3].init(src[start:]) + if err != nil { + return nil, err + } + + // destination, offset to match first output + dstSize := cap(dst) + dst = dst[:dstSize] + out := dst + dstEvery := (dstSize + 3) / 4 + + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + single := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + buf := d.buffer() + var off uint8 + var decoded int + + // Decode 2 values from each decoder/loop. + const bufoff = 256 + for { + if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { + break + } + + { + const stream = 0 + const stream2 = 1 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + { + const stream = 2 + const stream2 = 3 + br[stream].fillFast() + br[stream2].fillFast() + + val := br[stream].peekBitsFast(d.actualTableLog) + val2 := br[stream2].peekBitsFast(d.actualTableLog) + v := single[val&tlMask] + v2 := single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) + + val = br[stream].peekBitsFast(d.actualTableLog) + val2 = br[stream2].peekBitsFast(d.actualTableLog) + v = single[val&tlMask] + v2 = single[val2&tlMask] + br[stream].advance(uint8(v.entry)) + br[stream2].advance(uint8(v2.entry)) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) + } + + off += 2 + + if off == 0 { + if bufoff > dstEvery { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 1") + } + // There must at least be 3 buffers left. + if len(out)-bufoff < dstEvery*3 { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 2") + } + //copy(out, buf[0][:]) + //copy(out[dstEvery:], buf[1][:]) + //copy(out[dstEvery*2:], buf[2][:]) + //copy(out[dstEvery*3:], buf[3][:]) + *(*[bufoff]byte)(out) = buf[0] + *(*[bufoff]byte)(out[dstEvery:]) = buf[1] + *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] + *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] + out = out[bufoff:] + decoded += bufoff * 4 + } + } + if off > 0 { + ioff := int(off) + if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 3") + } + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) + decoded += int(off) * 4 + out = out[off:] + } + + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) + for i := range br { + offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } + br := &br[i] + bitsLeft := br.remaining() + for bitsLeft > 0 { + br.fill() + if offset >= endsAt { + d.bufs.Put(buf) + return nil, errors.New("corruption detected: stream overrun 4") + } + + // Read value and increment offset. + val := br.peekBitsFast(d.actualTableLog) + v := single[val&tlMask].entry + nBits := uint8(v) + br.advance(nBits) + bitsLeft -= uint(nBits) + out[offset] = uint8(v >> 8) + offset++ + } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i + err = br.close() + if err != nil { + return nil, err + } + } + d.bufs.Put(buf) + if dstSize != decoded { + return nil, errors.New("corruption detected: short output block") + } + return dst, nil +} + +// Decompress1X will decompress a 1X encoded stream. +// The cap of the output buffer will be the maximum decompressed size. +// The length of the supplied input must match the end of a block exactly. +func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { + if len(d.dt.single) == 0 { + return nil, errors.New("no table loaded") + } + if use8BitTables && d.actualTableLog <= 8 { + return d.decompress1X8Bit(dst, src) + } + var br bitReaderShifted + err := br.init(src) + if err != nil { + return dst, err + } + maxDecodedSize := cap(dst) + dst = dst[:0] + + // Avoid bounds check by always having full sized table. + const tlSize = 1 << tableLogMax + const tlMask = tlSize - 1 + dt := d.dt.single[:tlSize] + + // Use temp table to avoid bound checks/append penalty. + bufs := d.buffer() + buf := &bufs[0] + var off uint8 + + for br.off >= 8 { + br.fillFast() + v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+0] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+1] = uint8(v.entry >> 8) + + // Refill + br.fillFast() + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+2] = uint8(v.entry >> 8) + + v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] + br.advance(uint8(v.entry)) + buf[off+3] = uint8(v.entry >> 8) + + off += 4 + if off == 0 { + if len(dst)+256 > maxDecodedSize { + br.close() + d.bufs.Put(bufs) + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:]...) + } + } + + if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + dst = append(dst, buf[:off]...) + + // br < 8, so uint8 is fine + bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead + for bitsLeft > 0 { + br.fill() + if false && br.bitsRead >= 32 { + if br.off >= 4 { + v := br.in[br.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + br.value = (br.value << 32) | uint64(low) + br.bitsRead -= 32 + br.off -= 4 + } else { + for br.off > 0 { + br.value = (br.value << 8) | uint64(br.in[br.off-1]) + br.bitsRead -= 8 + br.off-- + } + } + } + if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) + br.close() + return nil, ErrMaxDecodedSizeExceeded + } + v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] + nBits := uint8(v.entry) + br.advance(nBits) + bitsLeft -= nBits + dst = append(dst, uint8(v.entry>>8)) + } + d.bufs.Put(bufs) + return dst, br.close() +} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go new file mode 100644 index 00000000..e8ad17ad --- /dev/null +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -0,0 +1,337 @@ +// Package huff0 provides fast huffman encoding as used in zstd. +// +// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. +package huff0 + +import ( + "errors" + "fmt" + "math" + "math/bits" + "sync" + + "github.com/klauspost/compress/fse" +) + +const ( + maxSymbolValue = 255 + + // zstandard limits tablelog to 11, see: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description + tableLogMax = 11 + tableLogDefault = 11 + minTablelog = 5 + huffNodesLen = 512 + + // BlockSizeMax is maximum input size for a single block uncompressed. + BlockSizeMax = 1<<18 - 1 +) + +var ( + // ErrIncompressible is returned when input is judged to be too hard to compress. + ErrIncompressible = errors.New("input is not compressible") + + // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. + ErrUseRLE = errors.New("input is single value repeated") + + // ErrTooBig is return if input is too large for a single block. + ErrTooBig = errors.New("input too big") + + // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. + ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") +) + +type ReusePolicy uint8 + +const ( + // ReusePolicyAllow will allow reuse if it produces smaller output. + ReusePolicyAllow ReusePolicy = iota + + // ReusePolicyPrefer will re-use aggressively if possible. + // This will not check if a new table will produce smaller output, + // except if the current table is impossible to use or + // compressed output is bigger than input. + ReusePolicyPrefer + + // ReusePolicyNone will disable re-use of tables. + // This is slightly faster than ReusePolicyAllow but may produce larger output. + ReusePolicyNone + + // ReusePolicyMust must allow reuse and produce smaller output. + ReusePolicyMust +) + +type Scratch struct { + count [maxSymbolValue + 1]uint32 + + // Per block parameters. + // These can be used to override compression parameters of the block. + // Do not touch, unless you know what you are doing. + + // Out is output buffer. + // If the scratch is re-used before the caller is done processing the output, + // set this field to nil. + // Otherwise the output buffer will be re-used for next Compression/Decompression step + // and allocation will be avoided. + Out []byte + + // OutTable will contain the table data only, if a new table has been generated. + // Slice of the returned data. + OutTable []byte + + // OutData will contain the compressed data. + // Slice of the returned data. + OutData []byte + + // MaxDecodedSize will set the maximum allowed output size. + // This value will automatically be set to BlockSizeMax if not set. + // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. + MaxDecodedSize int + + br byteReader + + // MaxSymbolValue will override the maximum symbol value of the next block. + MaxSymbolValue uint8 + + // TableLog will attempt to override the tablelog for the next block. + // Must be <= 11 and >= 5. + TableLog uint8 + + // Reuse will specify the reuse policy + Reuse ReusePolicy + + // WantLogLess allows to specify a log 2 reduction that should at least be achieved, + // otherwise the block will be returned as incompressible. + // The reduction should then at least be (input size >> WantLogLess) + // If WantLogLess == 0 any improvement will do. + WantLogLess uint8 + + symbolLen uint16 // Length of active part of the symbol table. + maxCount int // count of the most probable symbol + clearCount bool // clear count + actualTableLog uint8 // Selected tablelog. + prevTableLog uint8 // Tablelog for previous table + prevTable cTable // Table used for previous compression. + cTable cTable // compression table + dt dTable // decompression table + nodes []nodeElt + tmpOut [4][]byte + fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. + huffWeight [maxSymbolValue + 1]byte +} + +// TransferCTable will transfer the previously used compression table. +func (s *Scratch) TransferCTable(src *Scratch) { + if cap(s.prevTable) < len(src.prevTable) { + s.prevTable = make(cTable, 0, maxSymbolValue+1) + } + s.prevTable = s.prevTable[:len(src.prevTable)] + copy(s.prevTable, src.prevTable) + s.prevTableLog = src.prevTableLog +} + +func (s *Scratch) prepare(in []byte) (*Scratch, error) { + if len(in) > BlockSizeMax { + return nil, ErrTooBig + } + if s == nil { + s = &Scratch{} + } + if s.MaxSymbolValue == 0 { + s.MaxSymbolValue = maxSymbolValue + } + if s.TableLog == 0 { + s.TableLog = tableLogDefault + } + if s.TableLog > tableLogMax || s.TableLog < minTablelog { + return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) + } + if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { + s.MaxDecodedSize = BlockSizeMax + } + if s.clearCount && s.maxCount == 0 { + for i := range s.count { + s.count[i] = 0 + } + s.clearCount = false + } + if cap(s.Out) == 0 { + s.Out = make([]byte, 0, len(in)) + } + s.Out = s.Out[:0] + + s.OutTable = nil + s.OutData = nil + if cap(s.nodes) < huffNodesLen+1 { + s.nodes = make([]nodeElt, 0, huffNodesLen+1) + } + s.nodes = s.nodes[:0] + if s.fse == nil { + s.fse = &fse.Scratch{} + } + s.br.init(in) + + return s, nil +} + +type cTable []cTableEntry + +func (c cTable) write(s *Scratch) error { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + s.Out = append(s.Out, uint8(len(b))) + s.Out = append(s.Out, b...) + return nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return ErrIncompressible + } + op := s.Out + // special case, pack weights 4 bits/weight. + op = append(op, 128|(maxSymbolValue-1)) + // be sure it doesn't cause msan issue in final combination + huffWeight[maxSymbolValue] = 0 + for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { + op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) + } + s.Out = op + return nil +} + +func (c cTable) estTableSize(s *Scratch) (sz int, err error) { + var ( + // precomputed conversion table + bitsToWeight [tableLogMax + 1]byte + huffLog = s.actualTableLog + // last weight is not saved. + maxSymbolValue = uint8(s.symbolLen - 1) + huffWeight = s.huffWeight[:256] + ) + const ( + maxFSETableLog = 6 + ) + // convert to weight + bitsToWeight[0] = 0 + for n := uint8(1); n < huffLog+1; n++ { + bitsToWeight[n] = huffLog + 1 - n + } + + // Acquire histogram for FSE. + hist := s.fse.Histogram() + hist = hist[:256] + for i := range hist[:16] { + hist[i] = 0 + } + for n := uint8(0); n < maxSymbolValue; n++ { + v := bitsToWeight[c[n].nBits] & 15 + huffWeight[n] = v + hist[v]++ + } + + // FSE compress if feasible. + if maxSymbolValue >= 2 { + huffMaxCnt := uint32(0) + huffMax := uint8(0) + for i, v := range hist[:16] { + if v == 0 { + continue + } + huffMax = byte(i) + if v > huffMaxCnt { + huffMaxCnt = v + } + } + s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) + s.fse.TableLog = maxFSETableLog + b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) + if err == nil && len(b) < int(s.symbolLen>>1) { + sz += 1 + len(b) + return sz, nil + } + // Unable to compress (RLE/uncompressible) + } + // write raw values as 4-bits (max : 15) + if maxSymbolValue > (256 - 128) { + // should not happen : likely means source cannot be compressed + return 0, ErrIncompressible + } + // special case, pack weights 4 bits/weight. + sz += 1 + int(maxSymbolValue/2) + return sz, nil +} + +// estimateSize returns the estimated size in bytes of the input represented in the +// histogram supplied. +func (c cTable) estimateSize(hist []uint32) int { + nbBits := uint32(7) + for i, v := range c[:len(hist)] { + nbBits += uint32(v.nBits) * hist[i] + } + return int(nbBits >> 3) +} + +// minSize returns the minimum possible size considering the shannon limit. +func (s *Scratch) minSize(total int) int { + nbBits := float64(7) + fTotal := float64(total) + for _, v := range s.count[:s.symbolLen] { + n := float64(v) + if n > 0 { + nbBits += math.Log2(fTotal/n) * n + } + } + return int(nbBits) >> 3 +} + +func highBit32(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go new file mode 100644 index 00000000..3954c512 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go @@ -0,0 +1,34 @@ +// Package cpuinfo gives runtime info about the current CPU. +// +// This is a very limited module meant for use internally +// in this project. For more versatile solution check +// https://github.com/klauspost/cpuid. +package cpuinfo + +// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. +func HasBMI1() bool { + return hasBMI1 +} + +// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. +func HasBMI2() bool { + return hasBMI2 +} + +// DisableBMI2 will disable BMI2, for testing purposes. +// Call returned function to restore previous state. +func DisableBMI2() func() { + old := hasBMI2 + hasBMI2 = false + return func() { + hasBMI2 = old + } +} + +// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. +func HasBMI() bool { + return HasBMI1() && HasBMI2() +} + +var hasBMI1 bool +var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go new file mode 100644 index 00000000..e802579c --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go @@ -0,0 +1,11 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package cpuinfo + +// go:noescape +func x86extensions() (bmi1, bmi2 bool) + +func init() { + hasBMI1, hasBMI2 = x86extensions() +} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s new file mode 100644 index 00000000..4465fbe9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s @@ -0,0 +1,36 @@ +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" +#include "funcdata.h" +#include "go_asm.h" + +TEXT ·x86extensions(SB), NOSPLIT, $0 + // 1. determine max EAX value + XORQ AX, AX + CPUID + + CMPQ AX, $7 + JB unsupported + + // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" + MOVQ $7, AX + MOVQ $0, CX + CPUID + + BTQ $3, BX // bit 3 = BMI1 + SETCS AL + + BTQ $8, BX // bit 8 = BMI2 + SETCS AH + + MOVB AL, bmi1+0(FP) + MOVB AH, bmi2+1(FP) + RET + +unsupported: + XORQ AX, AX + MOVB AL, bmi1+0(FP) + MOVB AL, bmi2+1(FP) + RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/LICENSE b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE new file mode 100644 index 00000000..6050c10f --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go new file mode 100644 index 00000000..40796a49 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -0,0 +1,264 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Decode handles the Snappy block format, not the Snappy stream format. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +// +// Reader handles the Snappy stream format, not the Snappy block format. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +func (r *Reader) fill() error { + for r.i >= r.j { + if !r.readFull(r.buf[:4], true) { + return r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.decoded[:n], false) { + return r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return r.err + } + } + + return nil +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil +} + +// ReadByte satisfies the io.ByteReader interface. +func (r *Reader) ReadByte() (byte, error) { + if r.err != nil { + return 0, r.err + } + + if err := r.fill(); err != nil { + return 0, err + } + + c := r.decoded[r.i] + r.i++ + return c, nil +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go new file mode 100644 index 00000000..77395a6b --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go @@ -0,0 +1,113 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. + // If no overlap, use the built-in copy: + if offset >= length { + copy(dst[d:d+length], dst[d-offset:]) + d += length + continue + } + + // Unlike the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + // + // We align the slices into a and b and show the compiler they are the same size. + // This allows the loop to run without bounds checks. + a := dst[d : d+length] + b := dst[d-offset:] + b = b[:len(a)] + for i := range a { + a[i] = b[i] + } + d += length + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go new file mode 100644 index 00000000..13c6040a --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +// +// Encode handles the Snappy block format, not the Snappy stream format. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +// +// Writer handles the Snappy stream format, not the Snappy block format. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go new file mode 100644 index 00000000..298c4f8e --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -0,0 +1,240 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snapref + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go new file mode 100644 index 00000000..34d01f4a --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snapref implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snapref + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod new file mode 100644 index 00000000..2263853f --- /dev/null +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -0,0 +1,4 @@ +module github.com/klauspost/compress + +go 1.16 + diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum new file mode 100644 index 00000000..e69de29b diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md new file mode 100644 index 00000000..65b38abe --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -0,0 +1,441 @@ +# zstd + +[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. +It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. +A high performance compression algorithm is implemented. For now focused on speed. + +This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. + +This package is pure Go and without use of "unsafe". + +The `zstd` package is provided as open source software using a Go standard license. + +Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. + +For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). + +## Installation + +Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. + +[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) + +## Compressor + +### Status: + +STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively +used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. + +There may still be specific combinations of data types/size/settings that could lead to edge cases, +so as always, testing is recommended. + +For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. + +* The "Fastest" compression ratio is roughly equivalent to zstd level 1. +* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). +* The "Better" compression ratio is roughly equivalent to zstd level 7. +* The "Best" compression ratio is roughly equivalent to zstd level 11. + +In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. +The compression ratio compared to stdlib is around level 3, but usually 3x as fast. + + +### Usage + +An Encoder can be used for either compressing a stream via the +`io.WriteCloser` interface supported by the Encoder or as multiple independent +tasks via the `EncodeAll` function. +Smaller encodes are encouraged to use the EncodeAll function. +Use `NewWriter` to create a new instance that can be used for both. + +To create a writer with default options, do like this: + +```Go +// Compress input to output. +func Compress(in io.Reader, out io.Writer) error { + enc, err := zstd.NewWriter(out) + if err != nil { + return err + } + _, err = io.Copy(enc, in) + if err != nil { + enc.Close() + return err + } + return enc.Close() +} +``` + +Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. +Even if your encode fails, you should still call `Close()` to release any resources that may be held up. + +The above is fine for big encodes. However, whenever possible try to *reuse* the writer. + +To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. +This will allow the encoder to reuse all resources and avoid wasteful allocations. + +Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part +of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change +in the future. So if you want to limit concurrency for future updates, specify the concurrency +you would like. + +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + +You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined +compression settings can be specified. + +#### Future Compatibility Guarantees + +This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. + +The goal will be to keep the default efficiency at the default zstd (level 3). +However the encoding should never be assumed to remain the same, +and you should not use hashes of compressed output for similarity checks. + +The Encoder can be assumed to produce the same output from the exact same code version. +However, the may be modes in the future that break this, +although they will not be enabled without an explicit option. + +This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. + +Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), +[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) +and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). + +#### Blocks + +For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. + +`EncodeAll` will encode all input in src and append it to dst. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. + +Encoded blocks can be concatenated and the result will be the combined input stream. +Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. + +Especially when encoding blocks you should take special care to reuse the encoder. +This will effectively make it run without allocations after a warmup period. +To make it run completely without allocations, supply a destination buffer with space for all content. + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a writer that caches compressors. +// For this operation type we supply a nil Reader. +var encoder, _ = zstd.NewWriter(nil) + +// Compress a buffer. +// If you have a destination buffer, the allocation in the call can also be eliminated. +func Compress(src []byte) []byte { + return encoder.EncodeAll(src, make([]byte, 0, len(src))) +} +``` + +You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` +option when creating the writer. + +Using the Encoder for both a stream and individual blocks concurrently is safe. + +### Performance + +I have collected some speed examples to compare speed and compression against other compressors. + +* `file` is the input file. +* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. +* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". +* `insize`/`outsize` is the input/output size. +* `millis` is the number of milliseconds used for compression. +* `mb/s` is megabytes (2^20 bytes) per second. + +``` +Silesia Corpus: +http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip + +This package: +file out level insize outsize millis mb/s +silesia.tar zskp 1 211947520 73821326 634 318.47 +silesia.tar zskp 2 211947520 67655404 1508 133.96 +silesia.tar zskp 3 211947520 64746933 3000 67.37 +silesia.tar zskp 4 211947520 60073508 16926 11.94 + +cgo zstd: +silesia.tar zstd 1 211947520 73605392 543 371.56 +silesia.tar zstd 3 211947520 66793289 864 233.68 +silesia.tar zstd 6 211947520 62916450 1913 105.66 +silesia.tar zstd 9 211947520 60212393 5063 39.92 + +gzip, stdlib/this package: +silesia.tar gzstd 1 211947520 80007735 1498 134.87 +silesia.tar gzkp 1 211947520 80088272 1009 200.31 + +GOB stream of binary data. Highly compressible. +https://files.klauspost.com/compress/gob-stream.7z + +file out level insize outsize millis mb/s +gob-stream zskp 1 1911399616 233948096 3230 564.34 +gob-stream zskp 2 1911399616 203997694 4997 364.73 +gob-stream zskp 3 1911399616 173526523 13435 135.68 +gob-stream zskp 4 1911399616 162195235 47559 38.33 + +gob-stream zstd 1 1911399616 249810424 2637 691.26 +gob-stream zstd 3 1911399616 208192146 3490 522.31 +gob-stream zstd 6 1911399616 193632038 6687 272.56 +gob-stream zstd 9 1911399616 177620386 16175 112.70 + +gob-stream gzstd 1 1911399616 357382013 9046 201.49 +gob-stream gzkp 1 1911399616 359136669 4885 373.08 + +The test data for the Large Text Compression Benchmark is the first +10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. +http://mattmahoney.net/dc/textdata.html + +file out level insize outsize millis mb/s +enwik9 zskp 1 1000000000 343833605 3687 258.64 +enwik9 zskp 2 1000000000 317001237 7672 124.29 +enwik9 zskp 3 1000000000 291915823 15923 59.89 +enwik9 zskp 4 1000000000 261710291 77697 12.27 + +enwik9 zstd 1 1000000000 358072021 3110 306.65 +enwik9 zstd 3 1000000000 313734672 4784 199.35 +enwik9 zstd 6 1000000000 295138875 10290 92.68 +enwik9 zstd 9 1000000000 278348700 28549 33.40 + +enwik9 gzstd 1 1000000000 382578136 8608 110.78 +enwik9 gzkp 1 1000000000 382781160 5628 169.45 + +Highly compressible JSON file. +https://files.klauspost.com/compress/github-june-2days-2019.json.zst + +file out level insize outsize millis mb/s +github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 +github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 +github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 +github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 + +github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 +github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 +github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 +github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 + +github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 +github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 + +VM Image, Linux mint with a few installed applications: +https://files.klauspost.com/compress/rawstudio-mint14.7z + +file out level insize outsize millis mb/s +rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 +rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 +rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 +rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 + +rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 +rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 +rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 +rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 + +rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 +rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 + +CSV data: +https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst + +file out level insize outsize millis mb/s +nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 +nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 +nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 +nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 + +nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 +nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 +nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 +nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 + +nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 +nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 +``` + +## Decompressor + +Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. + +This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), +kindly supplied by [fuzzit.dev](https://fuzzit.dev/). +The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, +or run it past its limits with ANY input provided. + +### Usage + +The package has been designed for two main usages, big streams of data and smaller in-memory buffers. +There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. + +For streaming use a simple setup could look like this: + +```Go +import "github.com/klauspost/compress/zstd" + +func Decompress(in io.Reader, out io.Writer) error { + d, err := zstd.NewReader(in) + if err != nil { + return err + } + defer d.Close() + + // Copy content... + _, err = io.Copy(out, d) + return err +} +``` + +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. + +For decoding buffers, it could look something like this: + +```Go +import "github.com/klauspost/compress/zstd" + +// Create a reader that caches decompressors. +// For this operation type we supply a nil Reader. +var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) + +// Decompress a buffer. We don't supply a destination buffer, +// so it will be allocated by the decoder. +func Decompress(src []byte) ([]byte, error) { + return decoder.DecodeAll(src, nil) +} +``` + +Both of these cases should provide the functionality needed. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + +It will only allow a certain number of concurrent operations to run. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. + +### Dictionaries + +Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. + +Dictionaries are added individually to Decoders. +Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. +To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. +Several dictionaries can be added at once. + +The dictionary will be used automatically for the data that specifies them. +A re-used Decoder will still contain the dictionaries registered. + +When registering multiple dictionaries with the same ID, the last one will be used. + +It is possible to use dictionaries when compressing data. + +To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used +and it will likely be used even if it doesn't improve compression. + +The used dictionary must be used to decompress the content. + +For any real gains, the dictionary should be built with similar data. +If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. +Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. +For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). + +For now there is a fixed startup performance penalty for compressing content with dictionaries. +This will likely be improved over time. Just be aware to test performance when implementing. + +### Allocation-less operation + +The decoder has been designed to operate without allocations after a warmup. + +This means that you should *store* the decoder for best performance. +To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. +A decoder can safely be re-used even if the previous stream failed. + +To release the resources, you must call the `Close()` function on a decoder. +After this it can *no longer be reused*, but all running goroutines will be stopped. +So you *must* use this if you will no longer need the Reader. + +For decompressing smaller buffers a single decoder can be used. +When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. +In this case no unneeded allocations should be made. + +### Concurrency + +The buffer decoder does everything on the same goroutine and does nothing concurrently. +It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. + +The stream decoder will create goroutines that: + +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. + +So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. + +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + +Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. + +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + +### Benchmarks + +The first two are streaming decodes and the last are smaller inputs. + +Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. + +``` +BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op +BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op + +Concurrent blocks, performance: + +BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op +BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op +``` + +This reflects the performance around May 2022, but this may be out of date. + +## Zstd inside ZIP files + +It is possible to use zstandard to compress individual files inside zip archives. +While this isn't widely supported it can be useful for internal files. + +To support the compression and decompression of these files you must register a compressor and decompressor. + +It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT +use the global registration functions. The main reason for this is that 2 registrations from +different packages will result in a panic. + +It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip +files concurrently, and using a single instance will allow reusing some resources. + +See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for +how to compress and decompress files inside zip archives. + +# Contributions + +Contributions are always welcome. +For new features/fixes, remember to add tests and for performance enhancements include benchmarks. + +For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). + +This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go new file mode 100644 index 00000000..97299d49 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -0,0 +1,140 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math/bits" +) + +// bitReader reads a bitstream in reverse. +// The last set bit indicates the start of the stream and is used +// for aligning the input. +type bitReader struct { + in []byte + off uint // next byte to read is at in[off - 1] + value uint64 // Maybe use [16]byte, but shifting is awkward. + bitsRead uint8 +} + +// init initializes and resets the bit reader. +func (b *bitReader) init(in []byte) error { + if len(in) < 1 { + return errors.New("corrupt stream: too short") + } + b.in = in + b.off = uint(len(in)) + // The highest bit of the last byte indicates where to start + v := in[len(in)-1] + if v == 0 { + return errors.New("corrupt stream, did not find end of stream") + } + b.bitsRead = 64 + b.value = 0 + if len(in) >= 8 { + b.fillFastStart() + } else { + b.fill() + b.fill() + } + b.bitsRead += 8 - uint8(highBits(uint32(v))) + return nil +} + +// getBits will return n bits. n can be 0. +func (b *bitReader) getBits(n uint8) int { + if n == 0 /*|| b.bitsRead >= 64 */ { + return 0 + } + return int(b.get32BitsFast(n)) +} + +// get32BitsFast requires that at least one bit is requested every time. +// There are no checks if the buffer is filled. +func (b *bitReader) get32BitsFast(n uint8) uint32 { + const regMask = 64 - 1 + v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) + b.bitsRead += n + return v +} + +// fillFast() will make sure at least 32 bits are available. +// There must be at least 4 bytes available. +func (b *bitReader) fillFast() { + if b.bitsRead < 32 { + return + } + // 2 bounds checks. + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 +} + +// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. +func (b *bitReader) fillFastStart() { + // Do single re-slice to avoid bounds checks. + b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.bitsRead = 0 + b.off -= 8 +} + +// fill() will make sure at least 32 bits are available. +func (b *bitReader) fill() { + if b.bitsRead < 32 { + return + } + if b.off >= 4 { + v := b.in[b.off-4:] + v = v[:4] + low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + b.value = (b.value << 32) | uint64(low) + b.bitsRead -= 32 + b.off -= 4 + return + } + for b.off > 0 { + b.value = (b.value << 8) | uint64(b.in[b.off-1]) + b.bitsRead -= 8 + b.off-- + } +} + +// finished returns true if all bits have been read from the bit stream. +func (b *bitReader) finished() bool { + return b.off == 0 && b.bitsRead >= 64 +} + +// overread returns true if more bits have been requested than is on the stream. +func (b *bitReader) overread() bool { + return b.bitsRead > 64 +} + +// remain returns the number of bits remaining. +func (b *bitReader) remain() uint { + return b.off*8 + 64 - uint(b.bitsRead) +} + +// close the bitstream and returns an error if out-of-buffer reads occurred. +func (b *bitReader) close() error { + // Release reference. + b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } + if b.bitsRead > 64 { + return io.ErrUnexpectedEOF + } + return nil +} + +func highBits(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go new file mode 100644 index 00000000..78b3c61b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -0,0 +1,113 @@ +// Copyright 2018 Klaus Post. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. + +package zstd + +// bitWriter will write bits. +// First bit will be LSB of the first byte of output. +type bitWriter struct { + bitContainer uint64 + nBits uint8 + out []byte +} + +// bitMask16 is bitmasks. Has extra to avoid bounds check. +var bitMask16 = [32]uint16{ + 0, 1, 3, 7, 0xF, 0x1F, + 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, + 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, + 0xFFFF, 0xFFFF} /* up to 16 bits */ + +var bitMask32 = [32]uint32{ + 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, + 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, + 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, + 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, +} // up to 32 bits + +// addBits16NC will add up to 16 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16NC(value uint16, bits uint8) { + b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits32NC will add up to 31 bits. +// It will not check if there is space for them, +// so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits32NC(value uint32, bits uint8) { + b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) + b.nBits += bits +} + +// addBits64NC will add up to 64 bits. +// There must be space for 32 bits. +func (b *bitWriter) addBits64NC(value uint64, bits uint8) { + if bits <= 31 { + b.addBits32Clean(uint32(value), bits) + return + } + b.addBits32Clean(uint32(value), 32) + b.flush32() + b.addBits32Clean(uint32(value>>32), bits-32) +} + +// addBits32Clean will add up to 32 bits. +// It will not check if there is space for them. +// The input must not contain more bits than specified. +func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. +// It will not check if there is space for them, so the caller must ensure that it has flushed recently. +func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { + b.bitContainer |= uint64(value) << (b.nBits & 63) + b.nBits += bits +} + +// flush32 will flush out, so there are at least 32 bits available for writing. +func (b *bitWriter) flush32() { + if b.nBits < 32 { + return + } + b.out = append(b.out, + byte(b.bitContainer), + byte(b.bitContainer>>8), + byte(b.bitContainer>>16), + byte(b.bitContainer>>24)) + b.nBits -= 32 + b.bitContainer >>= 32 +} + +// flushAlign will flush remaining full bytes and align to next byte boundary. +func (b *bitWriter) flushAlign() { + nbBytes := (b.nBits + 7) >> 3 + for i := uint8(0); i < nbBytes; i++ { + b.out = append(b.out, byte(b.bitContainer>>(i*8))) + } + b.nBits = 0 + b.bitContainer = 0 +} + +// close will write the alignment bit and write the final byte(s) +// to the output. +func (b *bitWriter) close() error { + // End mark + b.addBits16Clean(1, 1) + // flush until next byte. + b.flushAlign() + return nil +} + +// reset and continue writing by appending to out. +func (b *bitWriter) reset(out []byte) { + b.bitContainer = 0 + b.nBits = 0 + b.out = out +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go new file mode 100644 index 00000000..f52d1aed --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -0,0 +1,720 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sync" + + "github.com/klauspost/compress/huff0" + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type blockType uint8 + +//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex + +const ( + blockTypeRaw blockType = iota + blockTypeRLE + blockTypeCompressed + blockTypeReserved +) + +type literalsBlockType uint8 + +const ( + literalsBlockRaw literalsBlockType = iota + literalsBlockRLE + literalsBlockCompressed + literalsBlockTreeless +) + +const ( + // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) + maxCompressedBlockSize = 128 << 10 + + compressedBlockOverAlloc = 16 + maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc + + // Maximum possible block size (all Raw+Uncompressed). + maxBlockSize = (1 << 21) - 1 + + maxMatchLen = 131074 + maxSequences = 0x7f00 + 0xffff + + // We support slightly less than the reference decoder to be able to + // use ints on 32 bit archs. + maxOffsetBits = 30 +) + +var ( + huffDecoderPool = sync.Pool{New: func() interface{} { + return &huff0.Scratch{} + }} + + fseDecoderPool = sync.Pool{New: func() interface{} { + return &fseDecoder{} + }} +) + +type blockDec struct { + // Raw source data of the block. + data []byte + dataStorage []byte + + // Destination of the decoded data. + dst []byte + + // Buffer for literals data. + literalBuf []byte + + // Window size of the block. + WindowSize uint64 + + err error + + // Check against this crc + checkCRC []byte + + // Frame to use for singlethreaded decoding. + // Should not be used by the decoder itself since parent may be another frame. + localFrame *frameDec + + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + + // Block is RLE, this is the size. + RLESize uint32 + + Type blockType + + // Is this the last block of a frame? + Last bool + + // Use less memory + lowMem bool +} + +func (b *blockDec) String() string { + if b == nil { + return "" + } + return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) +} + +func newBlockDec(lowMem bool) *blockDec { + b := blockDec{ + lowMem: lowMem, + } + return &b +} + +// reset will reset the block. +// Input must be a start of a block and will be at the end of the block when returned. +func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { + b.WindowSize = windowSize + tmp, err := br.readSmall(3) + if err != nil { + println("Reading block header:", err) + return err + } + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + b.Last = bh&1 != 0 + b.Type = blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + maxSize := maxCompressedBlockSizeAlloc + switch b.Type { + case blockTypeReserved: + return ErrReservedBlockType + case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = uint32(cSize) + if b.lowMem { + maxSize = cSize + } + cSize = 1 + case blockTypeCompressed: + if debugDecoder { + println("Data size on stream:", cSize) + } + b.RLESize = 0 + maxSize = maxCompressedBlockSizeAlloc + if windowSize < maxCompressedBlockSize && b.lowMem { + maxSize = int(windowSize) + compressedBlockOverAlloc + } + if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { + if debugDecoder { + printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrCompressedSizeTooBig + } + // Empty compressed blocks must at least be 2 bytes + // for Literals_Block_Type and one for Sequences_Section_Header. + if cSize < 2 { + return ErrBlockTooSmall + } + case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + + b.RLESize = 0 + // We do not need a destination for raw blocks. + maxSize = -1 + default: + panic("Invalid block type") + } + + // Read block data. + if cap(b.dataStorage) < cSize { + if b.lowMem || cSize > maxCompressedBlockSize { + b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) + } else { + b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) + } + } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } + b.data, err = br.readBig(cSize, b.dataStorage) + if err != nil { + if debugDecoder { + println("Reading block:", err, "(", cSize, ")", len(b.data)) + printf("%T", br) + } + return err + } + return nil +} + +// sendEOF will make the decoder send EOF on this frame. +func (b *blockDec) sendErr(err error) { + b.Last = true + b.Type = blockTypeReserved + b.err = err +} + +// Close will release resources. +// Closed blockDec cannot be reset. +func (b *blockDec) Close() { +} + +// decodeBuf +func (b *blockDec) decodeBuf(hist *history) error { + switch b.Type { + case blockTypeRLE: + if cap(b.dst) < int(b.RLESize) { + if b.lowMem { + b.dst = make([]byte, b.RLESize) + } else { + b.dst = make([]byte, maxBlockSize) + } + } + b.dst = b.dst[:b.RLESize] + v := b.data[0] + for i := range b.dst { + b.dst[i] = v + } + hist.appendKeep(b.dst) + return nil + case blockTypeRaw: + hist.appendKeep(b.data) + return nil + case blockTypeCompressed: + saved := b.dst + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } + err := b.decodeCompressed(hist) + if debugDecoder { + println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) + } + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } + return err + case blockTypeReserved: + // Used for returning errors. + return b.err + default: + panic("Invalid block type") + } +} + +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { + // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header + if len(in) < 2 { + return in, ErrBlockTooSmall + } + + litType := literalsBlockType(in[0] & 3) + var litRegenSize int + var litCompSize int + sizeFormat := (in[0] >> 2) & 3 + var fourStreams bool + var literals []byte + switch litType { + case literalsBlockRaw, literalsBlockRLE: + switch sizeFormat { + case 0, 2: + // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. + litRegenSize = int(in[0] >> 3) + in = in[1:] + case 1: + // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + in = in[2:] + case 3: + // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) + in = in[3:] + } + case literalsBlockCompressed, literalsBlockTreeless: + switch sizeFormat { + case 0, 1: + // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). + if len(in) < 3 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + litRegenSize = int(n & 1023) + litCompSize = int(n >> 10) + fourStreams = sizeFormat == 1 + in = in[3:] + case 2: + fourStreams = true + if len(in) < 4 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + litRegenSize = int(n & 16383) + litCompSize = int(n >> 14) + in = in[4:] + case 3: + fourStreams = true + if len(in) < 5 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) + return in, ErrBlockTooSmall + } + n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) + litRegenSize = int(n & 262143) + litCompSize = int(n >> 18) + in = in[5:] + } + } + if debugDecoder { + println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) + } + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + + switch litType { + case literalsBlockRaw: + if len(in) < litRegenSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) + return in, ErrBlockTooSmall + } + literals = in[:litRegenSize] + in = in[litRegenSize:] + //printf("Found %d uncompressed literals\n", litRegenSize) + case literalsBlockRLE: + if len(in) < 1 { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) + return in, ErrBlockTooSmall + } + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + literals = b.literalBuf[:litRegenSize] + v := in[0] + for i := range literals { + literals[i] = v + } + in = in[1:] + if debugDecoder { + printf("Found %d RLE compressed literals\n", litRegenSize) + } + case literalsBlockTreeless: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + // Store compressed literals, so we defer decoding until we get history. + literals = in[:litCompSize] + in = in[litCompSize:] + if debugDecoder { + printf("Found %d compressed literals\n", litCompSize) + } + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") + } + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + var err error + // Use our out buffer. + huff.MaxDecodedSize = litRegenSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) + } + } + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } + } + var err error + huff, literals, err = huff0.ReadTable(literals, huff) + if err != nil { + println("reading huffman table:", err) + return in, err + } + hist.huffTree = huff + huff.MaxDecodedSize = litRegenSize + // Use our out buffer. + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) + } + if err != nil { + println("decoding compressed literals:", err) + return in, err + } + // Make sure we don't leak our literals buffer + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + // Re-cap to get extra size. + literals = b.literalBuf[:len(literals)] + if debugDecoder { + printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) + } + } + hist.decoders.literals = literals + return in, nil +} + +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + before := len(hist.decoders.out) + err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) + if err != nil { + return err + } + if hist.decoders.maxSyncLen > 0 { + hist.decoders.maxSyncLen += uint64(before) + hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { + if debugDecoder { + printf("prepareSequences: %d byte(s) input\n", len(in)) + } + // Decode Sequences + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section + if len(in) < 1 { + return ErrBlockTooSmall + } + var nSeqs int + seqHeader := in[0] + switch { + case seqHeader < 128: + nSeqs = int(seqHeader) + in = in[1:] + case seqHeader < 255: + if len(in) < 2 { + return ErrBlockTooSmall + } + nSeqs = int(seqHeader-128)<<8 | int(in[1]) + in = in[2:] + case seqHeader == 255: + if len(in) < 3 { + return ErrBlockTooSmall + } + nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) + in = in[3:] + } + if nSeqs == 0 && len(in) != 0 { + // When no sequences, there should not be any more data... + if debugDecoder { + printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) + } + return ErrUnexpectedBlockSize + } + + var seqs = &hist.decoders + seqs.nSeqs = nSeqs + if nSeqs > 0 { + if len(in) < 1 { + return ErrBlockTooSmall + } + br := byteReader{b: in, off: 0} + compMode := br.Uint8() + br.advance(1) + if debugDecoder { + printf("Compression modes: 0b%b", compMode) + } + for i := uint(0); i < 3; i++ { + mode := seqCompMode((compMode >> (6 - i*2)) & 3) + if debugDecoder { + println("Table", tableIndex(i), "is", mode) + } + var seq *sequenceDec + switch tableIndex(i) { + case tableLiteralLengths: + seq = &seqs.litLengths + case tableOffsets: + seq = &seqs.offsets + case tableMatchLengths: + seq = &seqs.matchLengths + default: + panic("unknown table") + } + switch mode { + case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } + seq.fse = &fsePredef[i] + case compModeRLE: + if br.remain() < 1 { + return ErrBlockTooSmall + } + v := br.Uint8() + br.advance(1) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + symb, err := decSymbolValue(v, symbolTableX[i]) + if err != nil { + printf("RLE Transform table (%v) error: %v", tableIndex(i), err) + return err + } + seq.fse.setRLE(symb) + if debugDecoder { + printf("RLE set to %+v, code: %v", symb, v) + } + case compModeFSE: + println("Reading table for", tableIndex(i)) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) + if err != nil { + println("Read table error:", err) + return err + } + err = seq.fse.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder { + println("Read table ok", "symbolLen:", seq.fse.symbolLen) + } + case compModeRepeat: + seq.repeat = true + } + if br.overread() { + return io.ErrUnexpectedEOF + } + } + in = br.unread() + } + if debugDecoder { + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") + } + + if nSeqs == 0 { + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] + } + return nil + } + br := seqs.br + if br == nil { + br = &bitReader{} + } + if err := br.init(in); err != nil { + return err + } + + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + // Extract blocks... + if false && hist.dict == nil { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) + var buf bytes.Buffer + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) + fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) + buf.Write(in) + os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) + } + + return nil +} + +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } + } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.prevOffset = hist.recentOffsets + + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} + +func (b *blockDec) executeSequences(hist *history) error { + hbytes := hist.b + if len(hbytes) > hist.windowSize { + hbytes = hbytes[len(hbytes)-hist.windowSize:] + // We do not need history anymore. + if hist.dict != nil { + hist.dict.content = nil + } + } + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) + if err != nil { + return err + } + return b.updateHistory(hist) +} + +func (b *blockDec) updateHistory(hist *history) error { + if len(b.data) > maxCompressedBlockSize { + return fmt.Errorf("compressed block size too large (%d)", len(b.data)) + } + // Set output and release references. + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + + if b.Last { + // if last block we don't care about history. + println("Last block, no history returned") + hist.b = hist.b[:0] + return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } + } + hist.decoders.out, hist.decoders.literals = nil, nil + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go new file mode 100644 index 00000000..12e8f6f0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -0,0 +1,871 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" + "math/bits" + + "github.com/klauspost/compress/huff0" +) + +type blockEnc struct { + size int + literals []byte + sequences []seq + coders seqCoders + litEnc *huff0.Scratch + dictLitEnc *huff0.Scratch + wr bitWriter + + extraLits int + output []byte + recentOffsets [3]uint32 + prevRecentOffsets [3]uint32 + + last bool + lowMem bool +} + +// init should be used once the block has been created. +// If called more than once, the effect is the same as calling reset. +func (b *blockEnc) init() { + if b.lowMem { + // 1K literals + if cap(b.literals) < 1<<10 { + b.literals = make([]byte, 0, 1<<10) + } + const defSeqs = 20 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + // 1K + if cap(b.output) < 1<<10 { + b.output = make([]byte, 0, 1<<10) + } + } else { + if cap(b.literals) < maxCompressedBlockSize { + b.literals = make([]byte, 0, maxCompressedBlockSize) + } + const defSeqs = 2000 + if cap(b.sequences) < defSeqs { + b.sequences = make([]seq, 0, defSeqs) + } + if cap(b.output) < maxCompressedBlockSize { + b.output = make([]byte, 0, maxCompressedBlockSize) + } + } + + if b.coders.mlEnc == nil { + b.coders.mlEnc = &fseEncoder{} + b.coders.mlPrev = &fseEncoder{} + b.coders.ofEnc = &fseEncoder{} + b.coders.ofPrev = &fseEncoder{} + b.coders.llEnc = &fseEncoder{} + b.coders.llPrev = &fseEncoder{} + } + b.litEnc = &huff0.Scratch{WantLogLess: 4} + b.reset(nil) +} + +// initNewEncode can be used to reset offsets and encoders to the initial state. +func (b *blockEnc) initNewEncode() { + b.recentOffsets = [3]uint32{1, 4, 8} + b.litEnc.Reuse = huff0.ReusePolicyNone + b.coders.setPrev(nil, nil, nil) +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) reset(prev *blockEnc) { + b.extraLits = 0 + b.literals = b.literals[:0] + b.size = 0 + b.sequences = b.sequences[:0] + b.output = b.output[:0] + b.last = false + if prev != nil { + b.recentOffsets = prev.prevRecentOffsets + } + b.dictLitEnc = nil +} + +// reset will reset the block for a new encode, but in the same stream, +// meaning that state will be carried over, but the block content is reset. +// If a previous block is provided, the recent offsets are carried over. +func (b *blockEnc) swapEncoders(prev *blockEnc) { + b.coders.swap(&prev.coders) + b.litEnc, prev.litEnc = prev.litEnc, b.litEnc +} + +// blockHeader contains the information for a block header. +type blockHeader uint32 + +// setLast sets the 'last' indicator on a block. +func (h *blockHeader) setLast(b bool) { + if b { + *h = *h | 1 + } else { + const mask = (1 << 24) - 2 + *h = *h & mask + } +} + +// setSize will store the compressed size of a block. +func (h *blockHeader) setSize(v uint32) { + const mask = 7 + *h = (*h)&mask | blockHeader(v<<3) +} + +// setType sets the block type. +func (h *blockHeader) setType(t blockType) { + const mask = 1 | (((1 << 24) - 1) ^ 7) + *h = (*h & mask) | blockHeader(t<<1) +} + +// appendTo will append the block header to a slice. +func (h blockHeader) appendTo(b []byte) []byte { + return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) +} + +// String returns a string representation of the block. +func (h blockHeader) String() string { + return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) +} + +// literalsHeader contains literals header information. +type literalsHeader uint64 + +// setType can be used to set the type of literal block. +func (h *literalsHeader) setType(t literalsBlockType) { + const mask = math.MaxUint64 - 3 + *h = (*h & mask) | literalsHeader(t) +} + +// setSize can be used to set a single size, for uncompressed and RLE content. +func (h *literalsHeader) setSize(regenLen int) { + inBits := bits.Len32(uint32(regenLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case inBits < 5: + lh |= (uint64(regenLen) << 3) | (1 << 60) + if debugEncoder { + got := int(lh>>3) & 0xff + if got != regenLen { + panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) + } + } + case inBits < 12: + lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) + case inBits < 20: + lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) + default: + panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) + } + *h = literalsHeader(lh) +} + +// setSizes will set the size of a compressed literals section and the input length. +func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { + compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) + // Only retain 2 bits + const mask = 3 + lh := uint64(*h & mask) + switch { + case compBits <= 10 && inBits <= 10: + if !single { + lh |= 1 << 2 + } + lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) + if debugEncoder { + const mmask = (1 << 24) - 1 + n := (lh >> 4) & mmask + if int(n&1023) != inLen { + panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) + } + if int(n>>10) != compLen { + panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) + } + } + case compBits <= 14 && inBits <= 14: + lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + case compBits <= 18 && inBits <= 18: + lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) + if single { + panic("single stream used with more than 10 bits length.") + } + default: + panic("internal error: block too big") + } + *h = literalsHeader(lh) +} + +// appendTo will append the literals header to a byte slice. +func (h literalsHeader) appendTo(b []byte) []byte { + size := uint8(h >> 60) + switch size { + case 1: + b = append(b, uint8(h)) + case 2: + b = append(b, uint8(h), uint8(h>>8)) + case 3: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) + case 4: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) + case 5: + b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) + default: + panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) + } + return b +} + +// size returns the output size with currently set values. +func (h literalsHeader) size() int { + return int(h >> 60) +} + +func (h literalsHeader) String() string { + return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) pushOffsets() { + b.prevRecentOffsets = b.recentOffsets +} + +// pushOffsets will push the recent offsets to the backup store. +func (b *blockEnc) popOffsets() { + b.recentOffsets = b.prevRecentOffsets +} + +// matchOffset will adjust recent offsets and return the adjusted one, +// if it matches a previous offset. +func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if true { + if lits > 0 { + switch offset { + case b.recentOffsets[0]: + offset = 1 + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } else { + switch offset { + case b.recentOffsets[1]: + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 1 + case b.recentOffsets[2]: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 2 + case b.recentOffsets[0] - 1: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset = 3 + default: + b.recentOffsets[2] = b.recentOffsets[1] + b.recentOffsets[1] = b.recentOffsets[0] + b.recentOffsets[0] = offset + offset += 3 + } + } + } else { + offset += 3 + } + return offset +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRaw(a []byte) { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(a))) + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output[:0]) + b.output = append(b.output, a...) + if debugEncoder { + println("Adding RAW block, length", len(a), "last:", b.last) + } +} + +// encodeRaw can be used to set the output to a raw representation of supplied bytes. +func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(src))) + bh.setType(blockTypeRaw) + dst = bh.appendTo(dst) + dst = append(dst, src...) + if debugEncoder { + println("Adding RAW block, length", len(src), "last:", b.last) + } + return dst +} + +// encodeLits can be used if the block is only litLen. +func (b *blockEnc) encodeLits(lits []byte, raw bool) error { + var bh blockHeader + bh.setLast(b.last) + bh.setSize(uint32(len(lits))) + + // Don't compress extremely small blocks + if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + } + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(lits) >= 1024 { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(lits, b.litEnc) + } else if len(lits) > 32 { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(lits, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + if debugEncoder { + println("Adding RAW block, length", len(lits), "last:", b.last) + } + bh.setType(blockTypeRaw) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits...) + return nil + case huff0.ErrUseRLE: + if debugEncoder { + println("Adding RLE block, length", len(lits)) + } + bh.setType(blockTypeRLE) + b.output = bh.appendTo(b.output) + b.output = append(b.output, lits[0]) + return nil + case nil: + default: + return err + } + // Compressed... + // Now, allow reuse + b.litEnc.Reuse = huff0.ReusePolicyAllow + bh.setType(blockTypeCompressed) + var lh literalsHeader + if reUsed { + if debugEncoder { + println("Reused tree, compressed to", len(out)) + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + } + // Set sizes + lh.setSizes(len(out), len(lits), single) + bh.setSize(uint32(len(out) + lh.size() + 1)) + + // Write block headers. + b.output = bh.appendTo(b.output) + b.output = lh.appendTo(b.output) + // Add compressed data. + b.output = append(b.output, out...) + // No sequences. + b.output = append(b.output, 0) + return nil +} + +// fuzzFseEncoder can be used to fuzz the FSE encoder. +func fuzzFseEncoder(data []byte) int { + if len(data) > maxSequences || len(data) < 2 { + return 0 + } + enc := fseEncoder{} + hist := enc.Histogram() + maxSym := uint8(0) + for i, v := range data { + v = v & 63 + data[i] = v + hist[v]++ + if v > maxSym { + maxSym = v + } + } + if maxSym == 0 { + // All 0 + return 0 + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + cnt := maxCount(hist[:maxSym]) + if cnt == len(data) { + // RLE + return 0 + } + enc.HistogramFinished(maxSym, cnt) + err := enc.normalizeCount(len(data)) + if err != nil { + return 0 + } + _, err = enc.writeCount(nil) + if err != nil { + panic(err) + } + return 1 +} + +// encode will encode the block and append the output in b.output. +// Previous offset codes must be pushed if more blocks are expected. +func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { + if len(b.sequences) == 0 { + return b.encodeLits(b.literals, rawAllLits) + } + // We want some difference to at least account for the headers. + saved := b.size - len(b.literals) - (b.size >> 5) + if saved < 16 { + if org == nil { + return errIncompressible + } + b.popOffsets() + return b.encodeLits(org, rawAllLits) + } + + var bh blockHeader + var lh literalsHeader + bh.setLast(b.last) + bh.setType(blockTypeCompressed) + // Store offset of the block header. Needed when we know the size. + bhOffset := len(b.output) + b.output = bh.appendTo(b.output) + + var ( + out []byte + reUsed, single bool + err error + ) + if b.dictLitEnc != nil { + b.litEnc.TransferCTable(b.dictLitEnc) + b.litEnc.Reuse = huff0.ReusePolicyAllow + b.dictLitEnc = nil + } + if len(b.literals) >= 1024 && !raw { + // Use 4 Streams. + out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) + } else if len(b.literals) > 32 && !raw { + // Use 1 stream + single = true + out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) + } else { + err = huff0.ErrIncompressible + } + + switch err { + case huff0.ErrIncompressible: + lh.setType(literalsBlockRaw) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals...) + if debugEncoder { + println("Adding literals RAW, length", len(b.literals)) + } + case huff0.ErrUseRLE: + lh.setType(literalsBlockRLE) + lh.setSize(len(b.literals)) + b.output = lh.appendTo(b.output) + b.output = append(b.output, b.literals[0]) + if debugEncoder { + println("Adding literals RLE") + } + case nil: + // Compressed litLen... + if reUsed { + if debugEncoder { + println("reused tree") + } + lh.setType(literalsBlockTreeless) + } else { + if debugEncoder { + println("new tree, size:", len(b.litEnc.OutTable)) + } + lh.setType(literalsBlockCompressed) + if debugEncoder { + _, _, err := huff0.ReadTable(out, nil) + if err != nil { + panic(err) + } + } + } + lh.setSizes(len(out), len(b.literals), single) + if debugEncoder { + printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) + println("Adding literal header:", lh) + } + b.output = lh.appendTo(b.output) + b.output = append(b.output, out...) + b.litEnc.Reuse = huff0.ReusePolicyAllow + if debugEncoder { + println("Adding literals compressed") + } + default: + if debugEncoder { + println("Adding literals ERROR:", err) + } + return err + } + // Sequence compression + + // Write the number of sequences + switch { + case len(b.sequences) < 128: + b.output = append(b.output, uint8(len(b.sequences))) + case len(b.sequences) < 0x7f00: // TODO: this could be wrong + n := len(b.sequences) + b.output = append(b.output, 128+uint8(n>>8), uint8(n)) + default: + n := len(b.sequences) - 0x7f00 + b.output = append(b.output, 255, uint8(n), uint8(n>>8)) + } + if debugEncoder { + println("Encoding", len(b.sequences), "sequences") + } + b.genCodes() + llEnc := b.coders.llEnc + ofEnc := b.coders.ofEnc + mlEnc := b.coders.mlEnc + err = llEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = ofEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + err = mlEnc.normalizeCount(len(b.sequences)) + if err != nil { + return err + } + + // Choose the best compression mode for each type. + // Will evaluate the new vs predefined and previous. + chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { + // See if predefined/previous is better + hist := cur.count[:cur.symbolLen] + nSize := cur.approxSize(hist) + cur.maxHeaderSize() + predefSize := preDef.approxSize(hist) + prevSize := prev.approxSize(hist) + + // Add a small penalty for new encoders. + // Don't bother with extremely small (<2 byte gains). + nSize = nSize + (nSize+2*8*16)>>4 + switch { + case predefSize <= prevSize && predefSize <= nSize || forcePreDef: + if debugEncoder { + println("Using predefined", predefSize>>3, "<=", nSize>>3) + } + return preDef, compModePredefined + case prevSize <= nSize: + if debugEncoder { + println("Using previous", prevSize>>3, "<=", nSize>>3) + } + return prev, compModeRepeat + default: + if debugEncoder { + println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") + println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) + } + return cur, compModeFSE + } + } + + // Write compression mode + var mode uint8 + if llEnc.useRLE { + mode |= uint8(compModeRLE) << 6 + llEnc.setRLE(b.sequences[0].llCode) + if debugEncoder { + println("llEnc.useRLE") + } + } else { + var m seqCompMode + llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) + mode |= uint8(m) << 6 + } + if ofEnc.useRLE { + mode |= uint8(compModeRLE) << 4 + ofEnc.setRLE(b.sequences[0].ofCode) + if debugEncoder { + println("ofEnc.useRLE") + } + } else { + var m seqCompMode + ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) + mode |= uint8(m) << 4 + } + + if mlEnc.useRLE { + mode |= uint8(compModeRLE) << 2 + mlEnc.setRLE(b.sequences[0].mlCode) + if debugEncoder { + println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) + } + } else { + var m seqCompMode + mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) + mode |= uint8(m) << 2 + } + b.output = append(b.output, mode) + if debugEncoder { + printf("Compression modes: 0b%b", mode) + } + b.output, err = llEnc.writeCount(b.output) + if err != nil { + return err + } + start := len(b.output) + b.output, err = ofEnc.writeCount(b.output) + if err != nil { + return err + } + if false { + println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) + for i, v := range ofEnc.norm[:ofEnc.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) + } + } + b.output, err = mlEnc.writeCount(b.output) + if err != nil { + return err + } + + // Maybe in block? + wr := &b.wr + wr.reset(b.output) + + var ll, of, ml cState + + // Current sequence + seq := len(b.sequences) - 1 + s := b.sequences[seq] + llEnc.setBits(llBitsTable[:]) + mlEnc.setBits(mlBitsTable[:]) + ofEnc.setBits(nil) + + llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] + + // We have 3 bounds checks here (and in the loop). + // Since we are iterating backwards it is kinda hard to avoid. + llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] + ll.init(wr, &llEnc.ct, llB) + of.init(wr, &ofEnc.ct, ofB) + wr.flush32() + ml.init(wr, &mlEnc.ct, mlB) + + // Each of these lookups also generates a bounds check. + wr.addBits32NC(s.litLen, llB.outBits) + wr.addBits32NC(s.matchLen, mlB.outBits) + wr.flush32() + wr.addBits32NC(s.offset, ofB.outBits) + if debugSequences { + println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) + } + seq-- + // Store sequences in reverse... + for seq >= 0 { + s = b.sequences[seq] + + ofB := ofTT[s.ofCode] + wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. + //of.encode(ofB) + nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 + dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) + wr.addBits16NC(of.state, uint8(nbBitsOut)) + of.state = of.stateTable[dstState] + + // Accumulate extra bits. + outBits := ofB.outBits & 31 + extraBits := uint64(s.offset & bitMask32[outBits]) + extraBitsN := outBits + + mlB := mlTT[s.mlCode] + //ml.encode(mlB) + nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 + dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) + wr.addBits16NC(ml.state, uint8(nbBitsOut)) + ml.state = ml.stateTable[dstState] + + outBits = mlB.outBits & 31 + extraBits = extraBits<> 16 + dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) + wr.addBits16NC(ll.state, uint8(nbBitsOut)) + ll.state = ll.stateTable[dstState] + + outBits = llB.outBits & 31 + extraBits = extraBits<= b.size { + // Maybe even add a bigger margin. + b.litEnc.Reuse = huff0.ReusePolicyNone + return errIncompressible + } + + // Size is output minus block header. + bh.setSize(uint32(len(b.output)-bhOffset) - 3) + if debugEncoder { + println("Rewriting block header", bh) + } + _ = bh.appendTo(b.output[bhOffset:bhOffset]) + b.coders.setPrev(llEnc, mlEnc, ofEnc) + return nil +} + +var errIncompressible = errors.New("incompressible") + +func (b *blockEnc) genCodes() { + if len(b.sequences) == 0 { + // nothing to do + return + } + if len(b.sequences) > math.MaxUint16 { + panic("can only encode up to 64K sequences") + } + // No bounds checks after here: + llH := b.coders.llEnc.Histogram() + ofH := b.coders.ofEnc.Histogram() + mlH := b.coders.mlEnc.Histogram() + for i := range llH { + llH[i] = 0 + } + for i := range ofH { + ofH[i] = 0 + } + for i := range mlH { + mlH[i] = 0 + } + + var llMax, ofMax, mlMax uint8 + for i := range b.sequences { + seq := &b.sequences[i] + v := llCode(seq.litLen) + seq.llCode = v + llH[v]++ + if v > llMax { + llMax = v + } + + v = ofCode(seq.offset) + seq.ofCode = v + ofH[v]++ + if v > ofMax { + ofMax = v + } + + v = mlCode(seq.matchLen) + seq.mlCode = v + mlH[v]++ + if v > mlMax { + mlMax = v + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) + } + } + } + maxCount := func(a []uint32) int { + var max uint32 + for _, v := range a { + if v > max { + max = v + } + } + return int(max) + } + if debugAsserts && mlMax > maxMatchLengthSymbol { + panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) + } + if debugAsserts && ofMax > maxOffsetBits { + panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) + } + if debugAsserts && llMax > maxLiteralLengthSymbol { + panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) + } + + b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) + b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) + b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) +} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go new file mode 100644 index 00000000..01a01e48 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go @@ -0,0 +1,85 @@ +// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. + +package zstd + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[blockTypeRaw-0] + _ = x[blockTypeRLE-1] + _ = x[blockTypeCompressed-2] + _ = x[blockTypeReserved-3] +} + +const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" + +var _blockType_index = [...]uint8{0, 12, 24, 43, 60} + +func (i blockType) String() string { + if i >= blockType(len(_blockType_index)-1) { + return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[literalsBlockRaw-0] + _ = x[literalsBlockRLE-1] + _ = x[literalsBlockCompressed-2] + _ = x[literalsBlockTreeless-3] +} + +const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" + +var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} + +func (i literalsBlockType) String() string { + if i >= literalsBlockType(len(_literalsBlockType_index)-1) { + return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[compModePredefined-0] + _ = x[compModeRLE-1] + _ = x[compModeFSE-2] + _ = x[compModeRepeat-3] +} + +const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" + +var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} + +func (i seqCompMode) String() string { + if i >= seqCompMode(len(_seqCompMode_index)-1) { + return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] +} +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[tableLiteralLengths-0] + _ = x[tableOffsets-1] + _ = x[tableMatchLengths-2] +} + +const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" + +var _tableIndex_index = [...]uint8{0, 19, 31, 48} + +func (i tableIndex) String() string { + if i >= tableIndex(len(_tableIndex_index)-1) { + return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go new file mode 100644 index 00000000..176788f2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -0,0 +1,131 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "io" +) + +type byteBuffer interface { + // Read up to 8 bytes. + // Returns io.ErrUnexpectedEOF if this cannot be satisfied. + readSmall(n int) ([]byte, error) + + // Read >8 bytes. + // MAY use the destination slice. + readBig(n int, dst []byte) ([]byte, error) + + // Read a single byte. + readByte() (byte, error) + + // Skip n bytes. + skipN(n int64) error +} + +// in-memory buffer +type byteBuf []byte + +func (b *byteBuf) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { + bb := *b + if len(bb) < n { + return nil, io.ErrUnexpectedEOF + } + r := bb[:n] + *b = bb[n:] + return r, nil +} + +func (b *byteBuf) readByte() (byte, error) { + bb := *b + if len(bb) < 1 { + return 0, nil + } + r := bb[0] + *b = bb[1:] + return r, nil +} + +func (b *byteBuf) skipN(n int64) error { + bb := *b + if n < 0 { + return fmt.Errorf("negative skip (%d) requested", n) + } + if int64(len(bb)) < n { + return io.ErrUnexpectedEOF + } + *b = bb[n:] + return nil +} + +// wrapper around a reader. +type readerWrapper struct { + r io.Reader + tmp [8]byte +} + +func (r *readerWrapper) readSmall(n int) ([]byte, error) { + if debugAsserts && n > 8 { + panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) + } + n2, err := io.ReadFull(r.r, r.tmp[:n]) + // We only really care about the actual bytes read. + if err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + if debugDecoder { + println("readSmall: got", n2, "want", n, "err", err) + } + return nil, err + } + return r.tmp[:n], nil +} + +func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { + if cap(dst) < n { + dst = make([]byte, n) + } + n2, err := io.ReadFull(r.r, dst[:n]) + if err == io.EOF && n > 0 { + err = io.ErrUnexpectedEOF + } + return dst[:n2], err +} + +func (r *readerWrapper) readByte() (byte, error) { + n2, err := r.r.Read(r.tmp[:1]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, err + } + if n2 != 1 { + return 0, io.ErrUnexpectedEOF + } + return r.tmp[0], nil +} + +func (r *readerWrapper) skipN(n int64) error { + n2, err := io.CopyN(io.Discard, r.r, n) + if n2 != n { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go new file mode 100644 index 00000000..0e59a242 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go @@ -0,0 +1,82 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// byteReader provides a byte reader that reads +// little endian values from a byte stream. +// The input stream is manually advanced. +// The reader performs no bounds checks. +type byteReader struct { + b []byte + off int +} + +// advance the stream b n bytes. +func (b *byteReader) advance(n uint) { + b.off += int(n) +} + +// overread returns whether we have advanced too far. +func (b *byteReader) overread() bool { + return b.off > len(b.b) +} + +// Int32 returns a little endian int32 starting at current offset. +func (b byteReader) Int32() int32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := int32(b2[3]) + v2 := int32(b2[2]) + v1 := int32(b2[1]) + v0 := int32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint8 returns the next byte +func (b *byteReader) Uint8() uint8 { + v := b.b[b.off] + return v +} + +// Uint32 returns a little endian uint32 starting at current offset. +func (b byteReader) Uint32() uint32 { + if r := b.remain(); r < 4 { + // Very rare + v := uint32(0) + for i := 1; i <= r; i++ { + v = (v << 8) | uint32(b.b[len(b.b)-i]) + } + return v + } + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// Uint32NC returns a little endian uint32 starting at current offset. +// The caller must be sure if there are at least 4 bytes left. +func (b byteReader) Uint32NC() uint32 { + b2 := b.b[b.off:] + b2 = b2[:4] + v3 := uint32(b2[3]) + v2 := uint32(b2[2]) + v1 := uint32(b2[1]) + v0 := uint32(b2[0]) + return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) +} + +// unread returns the unread portion of the input. +func (b byteReader) unread() []byte { + return b.b[b.off:] +} + +// remain will return the number of bytes remaining. +func (b byteReader) remain() int { + return len(b.b) - b.off +} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go new file mode 100644 index 00000000..5022e71c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -0,0 +1,230 @@ +// Copyright 2020+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "io" +) + +// HeaderMaxSize is the maximum size of a Frame and Block Header. +// If less is sent to Header.Decode it *may* still contain enough information. +const HeaderMaxSize = 14 + 3 + +// Header contains information about the first frame and block within that. +type Header struct { + // SingleSegment specifies whether the data is to be decompressed into a + // single contiguous memory segment. + // It implies that WindowSize is invalid and that FrameContentSize is valid. + SingleSegment bool + + // WindowSize is the window of data to keep while decoding. + // Will only be set if SingleSegment is false. + WindowSize uint64 + + // Dictionary ID. + // If 0, no dictionary. + DictionaryID uint32 + + // HasFCS specifies whether FrameContentSize has a valid value. + HasFCS bool + + // FrameContentSize is the expected uncompressed size of the entire frame. + FrameContentSize uint64 + + // Skippable will be true if the frame is meant to be skipped. + // This implies that FirstBlock.OK is false. + Skippable bool + + // SkippableID is the user-specific ID for the skippable frame. + // Valid values are between 0 to 15, inclusive. + SkippableID int + + // SkippableSize is the length of the user data to skip following + // the header. + SkippableSize uint32 + + // HeaderSize is the raw size of the frame header. + // + // For normal frames, it includes the size of the magic number and + // the size of the header (per section 3.1.1.1). + // It does not include the size for any data blocks (section 3.1.1.2) nor + // the size for the trailing content checksum. + // + // For skippable frames, this counts the size of the magic number + // along with the size of the size field of the payload. + // It does not include the size of the skippable payload itself. + // The total frame size is the HeaderSize plus the SkippableSize. + HeaderSize int + + // First block information. + FirstBlock struct { + // OK will be set if first block could be decoded. + OK bool + + // Is this the last block of a frame? + Last bool + + // Is the data compressed? + // If true CompressedSize will be populated. + // Unfortunately DecompressedSize cannot be determined + // without decoding the blocks. + Compressed bool + + // DecompressedSize is the expected decompressed size of the block. + // Will be 0 if it cannot be determined. + DecompressedSize int + + // CompressedSize of the data in the block. + // Does not include the block header. + // Will be equal to DecompressedSize if not Compressed. + CompressedSize int + } + + // If set there is a checksum present for the block content. + // The checksum field at the end is always 4 bytes long. + HasCheckSum bool +} + +// Decode the header from the beginning of the stream. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) Decode(in []byte) error { + *h = Header{} + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + b, in := in[:4], in[4:] + if !bytes.Equal(b, frameMagic) { + if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + return ErrMagicMismatch + } + if len(in) < 4 { + return io.ErrUnexpectedEOF + } + h.HeaderSize += 4 + h.Skippable = true + h.SkippableID = int(b[0] & 0xf) + h.SkippableSize = binary.LittleEndian.Uint32(in) + return nil + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + fhd, in := in[0], in[1:] + h.HeaderSize++ + h.SingleSegment = fhd&(1<<5) != 0 + h.HasCheckSum = fhd&(1<<2) != 0 + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + if !h.SingleSegment { + if len(in) < 1 { + return io.ErrUnexpectedEOF + } + var wd byte + wd, in = in[0], in[1:] + h.HeaderSize++ + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + h.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + if len(in) < int(size) { + return io.ErrUnexpectedEOF + } + b, in = in[:size], in[size:] + h.HeaderSize += int(size) + switch size { + case 1: + h.DictionaryID = uint32(b[0]) + case 2: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if h.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + + if fcsSize > 0 { + h.HasFCS = true + if len(in) < fcsSize { + return io.ErrUnexpectedEOF + } + b, in = in[:fcsSize], in[fcsSize:] + h.HeaderSize += int(fcsSize) + switch fcsSize { + case 1: + h.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + } + + // Frame Header done, we will not fail from now on. + if len(in) < 3 { + return nil + } + tmp := in[:3] + bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) + h.FirstBlock.Last = bh&1 != 0 + blockType := blockType((bh >> 1) & 3) + // find size. + cSize := int(bh >> 3) + switch blockType { + case blockTypeReserved: + return nil + case blockTypeRLE: + h.FirstBlock.Compressed = true + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = 1 + case blockTypeCompressed: + h.FirstBlock.Compressed = true + h.FirstBlock.CompressedSize = cSize + case blockTypeRaw: + h.FirstBlock.DecompressedSize = cSize + h.FirstBlock.CompressedSize = cSize + default: + panic("Invalid block type") + } + + h.FirstBlock.OK = true + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go new file mode 100644 index 00000000..78c10755 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -0,0 +1,950 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "context" + "encoding/binary" + "io" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Decoder provides decoding of zstandard streams. +// The decoder has been designed to operate without allocations after a warmup. +// This means that you should store the decoder for best performance. +// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. +// A decoder can safely be re-used even if the previous stream failed. +// To release the resources, you must call the Close() function on a decoder. +type Decoder struct { + o decoderOptions + + // Unreferenced decoders, ready for use. + decoders chan *blockDec + + // Current read position used for Reader functionality. + current decoderState + + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + dstBuf []byte + } + + frame *frameDec + + // Custom dictionaries. + // Always uses copies. + dicts map[uint32]dict + + // streamWg is the waitgroup for all streams + streamWg sync.WaitGroup +} + +// decoderState is used for maintaining state when the decoder +// is used for streaming. +type decoderState struct { + // current block being written to stream. + decodeOutput + + // output in order to be written to stream. + output chan decodeOutput + + // cancel remaining output. + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest + + flushed bool +} + +var ( + // Check the interfaces we want to support. + _ = io.WriterTo(&Decoder{}) + _ = io.Reader(&Decoder{}) +) + +// NewReader creates a new decoder. +// A nil Reader can be provided in which case Reset can be used to start a decode. +// +// A Decoder can be used in two modes: +// +// 1) As a stream, or +// 2) For stateless decoding using DecodeAll. +// +// Only a single stream can be decoded concurrently, but the same decoder +// can run multiple concurrent stateless decodes. It is even possible to +// use stateless decodes while a stream is being decoded. +// +// The Reset function can be used to initiate a new stream, which is will considerably +// reduce the allocations normally caused by NewReader. +func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { + initPredefined() + var d Decoder + d.o.setDefault() + for _, o := range opts { + err := o(&d.o) + if err != nil { + return nil, err + } + } + d.current.crc = xxhash.New() + d.current.flushed = true + + if r == nil { + d.current.err = ErrDecoderNilInput + } + + // Transfer option dicts. + d.dicts = make(map[uint32]dict, len(d.o.dicts)) + for _, dc := range d.o.dicts { + d.dicts[dc.id] = dc + } + d.o.dicts = nil + + // Create decoders + d.decoders = make(chan *blockDec, d.o.concurrent) + for i := 0; i < d.o.concurrent; i++ { + dec := newBlockDec(d.o.lowMem) + dec.localFrame = newFrameDec(d.o) + d.decoders <- dec + } + + if r == nil { + return &d, nil + } + return &d, d.Reset(r) +} + +// Read bytes from the decompressed stream into p. +// Returns the number of bytes written and any error that occurred. +// When the stream is done, io.EOF will be returned. +func (d *Decoder) Read(p []byte) (int, error) { + var n int + for { + if len(d.current.b) > 0 { + filled := copy(p, d.current.b) + p = p[filled:] + d.current.b = d.current.b[filled:] + n += filled + } + if len(p) == 0 { + break + } + if len(d.current.b) == 0 { + // We have an error and no more data + if d.current.err != nil { + break + } + if !d.nextBlock(n == 0) { + return n, d.current.err + } + } + } + if len(d.current.b) > 0 { + if debugDecoder { + println("returning", n, "still bytes left:", len(d.current.b)) + } + // Only return error at end of block + return n, nil + } + if d.current.err != nil { + d.drainOutput() + } + if debugDecoder { + println("returning", n, d.current.err, len(d.decoders)) + } + return n, d.current.err +} + +// Reset will reset the decoder the supplied stream after the current has finished processing. +// Note that this functionality cannot be used after Close has been called. +// Reset can be called with a nil reader to release references to the previous reader. +// After being called with a nil reader, no other operations than Reset or DecodeAll or Close +// should be used. +func (d *Decoder) Reset(r io.Reader) error { + if d.current.err == ErrDecoderClosed { + return d.current.err + } + + d.drainOutput() + + d.syncStream.br.r = nil + if r == nil { + d.current.err = ErrDecoderNilInput + if len(d.current.b) > 0 { + d.current.b = d.current.b[:0] + } + d.current.flushed = true + return nil + } + + // If bytes buffer and < 5MB, do sync decoding anyway. + if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { + bb2 := bb + if debugDecoder { + println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) + } + b := bb2.Bytes() + var dst []byte + if cap(d.syncStream.dstBuf) > 0 { + dst = d.syncStream.dstBuf[:0] + } + + dst, err := d.DecodeAll(b, dst) + if err == nil { + err = io.EOF + } + // Save output buffer + d.syncStream.dstBuf = dst + d.current.b = dst + d.current.err = err + d.current.flushed = true + if debugDecoder { + println("sync decode to", len(dst), "bytes, err:", err) + } + return nil + } + // Remove current block. + d.stashDecoder() + d.current.decodeOutput = decodeOutput{} + d.current.err = nil + d.current.flushed = false + d.current.d = nil + d.syncStream.dstBuf = nil + + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) + } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) + } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + + return nil +} + +// drainOutput will drain the output until errEndOfStream is sent. +func (d *Decoder) drainOutput() { + if d.current.cancel != nil { + if debugDecoder { + println("cancelling current") + } + d.current.cancel() + d.current.cancel = nil + } + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) + } + d.decoders <- d.current.d + d.current.d = nil + d.current.b = nil + } + if d.current.output == nil || d.current.flushed { + println("current already flushed") + return + } + for v := range d.current.output { + if v.d != nil { + if debugDecoder { + printf("re-adding decoder %p", v.d) + } + d.decoders <- v.d + } + } + d.current.output = nil + d.current.flushed = true +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (d *Decoder) WriteTo(w io.Writer) (int64, error) { + var n int64 + for { + if len(d.current.b) > 0 { + n2, err2 := w.Write(d.current.b) + n += int64(n2) + if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { + d.current.err = err2 + } else if n2 != len(d.current.b) { + d.current.err = io.ErrShortWrite + } + } + if d.current.err != nil { + break + } + d.nextBlock(true) + } + err := d.current.err + if err != nil { + d.drainOutput() + } + if err == io.EOF { + err = nil + } + return n, err +} + +// DecodeAll allows stateless decoding of a blob of bytes. +// Output will be appended to dst, so if the destination size is known +// you can pre-allocate the destination slice to avoid allocations. +// DecodeAll can be used concurrently. +// The Decoder concurrency limits will be respected. +func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { + if d.decoders == nil { + return dst, ErrDecoderClosed + } + + // Grab a block decoder and frame decoder. + block := <-d.decoders + frame := block.localFrame + initialSize := len(dst) + defer func() { + if debugDecoder { + printf("re-adding decoder: %p", block) + } + frame.rawInput = nil + frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } + d.decoders <- block + }() + frame.bBuf = input + + for { + frame.history.reset() + err := frame.reset(&frame.bBuf) + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil + } + return dst, err + } + if frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + return nil, ErrUnknownDictionary + } + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(&dict) + } + if frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) + } + return dst, ErrWindowSizeExceeded + } + if frame.FrameContentSize != fcsUnknown { + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { + if debugDecoder { + println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) + } + return dst, ErrDecoderSizeExceeded + } + if cap(dst)-len(dst) < int(frame.FrameContentSize) { + dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + + if cap(dst) == 0 && !d.o.limitToCap { + // Allocate len(input) * 2 by default if nothing is provided + // and we didn't get frame content size. + size := len(input) * 2 + // Cap to 1 MB. + if size > 1<<20 { + size = 1 << 20 + } + if uint64(size) > d.o.maxDecodedSize { + size = int(d.o.maxDecodedSize) + } + dst = make([]byte, 0, size) + } + + dst, err = frame.runDecoder(dst, block) + if err != nil { + return dst, err + } + if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { + return dst, ErrDecoderSizeExceeded + } + if len(frame.bBuf) == 0 { + if debugDecoder { + println("frame dbuf empty") + } + break + } + } + return dst, nil +} + +// nextBlock returns the next block. +// If an error occurs d.err will be set. +// Optionally the function can block for new output. +// If non-blocking mode is used the returned boolean will be false +// if no data was available without blocking. +func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.err != nil { + // Keep error state. + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() + if blocking { + d.current.decodeOutput, ok = <-d.current.output + } else { + select { + case d.current.decodeOutput, ok = <-d.current.output: + default: + return false + } + } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } + if debugDecoder { + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if !d.o.ignoreChecksum && len(next.b) > 0 { + n, err := d.current.crc.Write(next.b) + if err == nil { + if n != len(next.b) { + d.current.err = io.ErrShortWrite + } + } + } + if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 { + got := d.current.crc.Sum64() + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(got)) + if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) { + if debugDecoder { + println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + println("CRC ok", tmp[:]) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err != nil { + return false + } + if d.frame.DictionaryID != nil { + dict, ok := d.dicts[*d.frame.DictionaryID] + if !ok { + d.current.err = ErrUnknownDictionary + return false + } else { + d.frame.history.setDict(&dict) + } + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + if !d.o.ignoreChecksum { + d.frame.crc.Write(d.current.b) + } + if d.current.d.Last { + if !d.o.ignoreChecksum { + d.current.err = d.frame.checkCRC() + } else { + d.current.err = d.frame.consumeCRC() + } + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last + } + return true +} + +func (d *Decoder) stashDecoder() { + if d.current.d != nil { + if debugDecoder { + printf("re-adding current decoder %p", d.current.d) + } + d.decoders <- d.current.d + d.current.d = nil + } +} + +// Close will release all resources. +// It is NOT possible to reuse the decoder after this. +func (d *Decoder) Close() { + if d.current.err == ErrDecoderClosed { + return + } + d.drainOutput() + if d.current.cancel != nil { + d.current.cancel() + d.streamWg.Wait() + d.current.cancel = nil + } + if d.decoders != nil { + close(d.decoders) + for dec := range d.decoders { + dec.Close() + } + d.decoders = nil + } + if d.current.d != nil { + d.current.d.Close() + d.current.d = nil + } + d.current.err = ErrDecoderClosed +} + +// IOReadCloser returns the decoder as an io.ReadCloser for convenience. +// Any changes to the decoder will be reflected, so the returned ReadCloser +// can be reused along with the decoder. +// io.WriterTo is also supported by the returned ReadCloser. +func (d *Decoder) IOReadCloser() io.ReadCloser { + return closeWrapper{d: d} +} + +// closeWrapper wraps a function call as a closer. +type closeWrapper struct { + d *Decoder +} + +// WriteTo forwards WriteTo calls to the decoder. +func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { + return c.d.WriteTo(w) +} + +// Read forwards read calls to the decoder. +func (c closeWrapper) Read(p []byte) (n int, err error) { + return c.d.Read(p) +} + +// Close closes the decoder. +func (c closeWrapper) Close() error { + c.d.Close() + return nil +} + +type decodeOutput struct { + d *blockDec + b []byte + err error +} + +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil +} + +// Create Decoder: +// ASYNC: +// Spawn 3 go routines. +// 0: Read frames and decode block literals. +// 1: Decode sequences. +// 2: Execute sequences, send to output. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { + defer d.streamWg.Done() + br := readerWrapper{r: r} + + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history, recent:", block.async.newHist.recentOffsets) + } + hist.reset() + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + hist.windowSize = block.async.newHist.windowSize + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block + } + close(seqExecute) + hist.reset() + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history") + } + hist.reset() + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true + } else { + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } + } + } + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + hist.reset() + }() + + var hist history +decodeStream: + for { + var hasErr bool + hist.reset() + decodeBlock := func(block *blockDec) { + if hasErr { + if block != nil { + seqDecode <- block + } + return + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + return + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block + } + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + if debugDecoder { + println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) + } + + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + decodeBlock(dec) + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) + } + hist.reset() + if h.dict != nil { + hist.setDict(h.dict) + } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.checkCRC = nil + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + dec.err = err + } + var tmp [4]byte + copy(tmp[:], crc) + dec.checkCRC = tmp[:] + if debugDecoder { + println("found crc to check:", dec.checkCRC) + } + } + err = dec.err + last := dec.Last + decodeBlock(dec) + if err != nil { + break decodeStream + } + if last { + break + } + } + } + close(seqDecode) + wg.Wait() + hist.reset() + d.frame.history.b = frameHistCache +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go new file mode 100644 index 00000000..f42448e6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -0,0 +1,149 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "runtime" +) + +// DOption is an option for creating a decoder. +type DOption func(*decoderOptions) error + +// options retains accumulated state of multiple options. +type decoderOptions struct { + lowMem bool + concurrent int + maxDecodedSize uint64 + maxWindowSize uint64 + dicts []dict + ignoreChecksum bool + limitToCap bool + decodeBufsBelow int +} + +func (o *decoderOptions) setDefault() { + *o = decoderOptions{ + // use less ram: true for now, but may change. + lowMem: true, + concurrent: runtime.GOMAXPROCS(0), + maxWindowSize: MaxWindowSize, + decodeBufsBelow: 128 << 10, + } + if o.concurrent > 4 { + o.concurrent = 4 + } + o.maxDecodedSize = 64 << 30 +} + +// WithDecoderLowmem will set whether to use a lower amount of memory, +// but possibly have to allocate more while running. +func WithDecoderLowmem(b bool) DOption { + return func(o *decoderOptions) error { o.lowMem = b; return nil } +} + +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. +func WithDecoderConcurrency(n int) DOption { + return func(o *decoderOptions) error { + if n < 0 { + return errors.New("concurrency must be at least 1") + } + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } + return nil + } +} + +// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory +// non-streaming operations or maximum window size for streaming operations. +// This can be used to control memory usage of potentially hostile content. +// Maximum is 1 << 63 bytes. Default is 64GiB. +func WithDecoderMaxMemory(n uint64) DOption { + return func(o *decoderOptions) error { + if n == 0 { + return errors.New("WithDecoderMaxMemory must be at least 1") + } + if n > 1<<63 { + return errors.New("WithDecoderMaxmemory must be less than 1 << 63") + } + o.maxDecodedSize = n + return nil + } +} + +// WithDecoderDicts allows to register one or more dictionaries for the decoder. +// If several dictionaries with the same ID is provided the last one will be used. +func WithDecoderDicts(dicts ...[]byte) DOption { + return func(o *decoderOptions) error { + for _, b := range dicts { + d, err := loadDict(b) + if err != nil { + return err + } + o.dicts = append(o.dicts, *d) + } + return nil + } +} + +// WithDecoderMaxWindow allows to set a maximum window size for decodes. +// This allows rejecting packets that will cause big memory usage. +// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. +// If WithDecoderMaxMemory is set to a lower value, that will be used. +// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. +func WithDecoderMaxWindow(size uint64) DOption { + return func(o *decoderOptions) error { + if size < MinWindowSize { + return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") + } + if size > (1<<41)+7*(1<<38) { + return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") + } + o.maxWindowSize = size + return nil + } +} + +// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, +// or any size set in WithDecoderMaxMemory. +// This can be used to limit decoding to a specific maximum output size. +// Disabled by default. +func WithDecodeAllCapLimit(b bool) DOption { + return func(o *decoderOptions) error { + o.limitToCap = b + return nil + } +} + +// WithDecodeBuffersBelow will fully decode readers that have a +// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. +// This typically uses less allocations but will have the full decompressed object in memory. +// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. +// Default is 128KiB. +func WithDecodeBuffersBelow(size int) DOption { + return func(o *decoderOptions) error { + o.decodeBufsBelow = size + return nil + } +} + +// IgnoreChecksum allows to forcibly ignore checksum checking. +func IgnoreChecksum(b bool) DOption { + return func(o *decoderOptions) error { + o.ignoreChecksum = b + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go new file mode 100644 index 00000000..a36ae83e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -0,0 +1,122 @@ +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + + "github.com/klauspost/compress/huff0" +) + +type dict struct { + id uint32 + + litEnc *huff0.Scratch + llDec, ofDec, mlDec sequenceDec + //llEnc, ofEnc, mlEnc []*fseEncoder + offsets [3]int + content []byte +} + +var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} + +// ID returns the dictionary id or 0 if d is nil. +func (d *dict) ID() uint32 { + if d == nil { + return 0 + } + return d.id +} + +// DictContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) DictContentSize() int { + if d == nil { + return 0 + } + return len(d.content) +} + +// Load a dictionary as described in +// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format +func loadDict(b []byte) (*dict, error) { + // Check static field size. + if len(b) <= 8+(3*4) { + return nil, io.ErrUnexpectedEOF + } + d := dict{ + llDec: sequenceDec{fse: &fseDecoder{}}, + ofDec: sequenceDec{fse: &fseDecoder{}}, + mlDec: sequenceDec{fse: &fseDecoder{}}, + } + if !bytes.Equal(b[:4], dictMagic[:]) { + return nil, ErrMagicMismatch + } + d.id = binary.LittleEndian.Uint32(b[4:8]) + if d.id == 0 { + return nil, errors.New("dictionaries cannot have ID 0") + } + + // Read literal table + var err error + d.litEnc, b, err = huff0.ReadTable(b[8:], nil) + if err != nil { + return nil, err + } + d.litEnc.Reuse = huff0.ReusePolicyMust + + br := byteReader{ + b: b, + off: 0, + } + readDec := func(i tableIndex, dec *fseDecoder) error { + if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { + return err + } + if br.overread() { + return io.ErrUnexpectedEOF + } + err = dec.transform(symbolTableX[i]) + if err != nil { + println("Transform table error:", err) + return err + } + if debugDecoder || debugEncoder { + println("Read table ok", "symbolLen:", dec.symbolLen) + } + // Set decoders as predefined so they aren't reused. + dec.preDefined = true + return nil + } + + if err := readDec(tableOffsets, d.ofDec.fse); err != nil { + return nil, err + } + if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { + return nil, err + } + if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { + return nil, err + } + if br.remain() < 12 { + return nil, io.ErrUnexpectedEOF + } + + d.offsets[0] = int(br.Uint32()) + br.advance(4) + d.offsets[1] = int(br.Uint32()) + br.advance(4) + d.offsets[2] = int(br.Uint32()) + br.advance(4) + if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { + return nil, errors.New("invalid offset in dictionary") + } + d.content = make([]byte, br.remain()) + copy(d.content, br.unread()) + if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { + return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) + } + + return &d, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go new file mode 100644 index 00000000..15ae8ee8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -0,0 +1,188 @@ +package zstd + +import ( + "fmt" + "math/bits" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +const ( + dictShardBits = 6 +) + +type fastBase struct { + // cur is the offset at the start of hist + cur int32 + // maximum offset. Should be at least 2x block size. + maxMatchOff int32 + hist []byte + crc *xxhash.Digest + tmp [8]byte + blk *blockEnc + lastDictID uint32 + lowMem bool +} + +// CRC returns the underlying CRC writer. +func (e *fastBase) CRC() *xxhash.Digest { + return e.crc +} + +// AppendCRC will append the CRC to the destination slice and return it. +func (e *fastBase) AppendCRC(dst []byte) []byte { + crc := e.crc.Sum(e.tmp[:0]) + dst = append(dst, crc[7], crc[6], crc[5], crc[4]) + return dst +} + +// WindowSize returns the window size of the encoder, +// or a window size small enough to contain the input size, if > 0. +func (e *fastBase) WindowSize(size int64) int32 { + if size > 0 && size < int64(e.maxMatchOff) { + b := int32(1) << uint(bits.Len(uint(size))) + // Keep minimum window. + if b < 1024 { + b = 1024 + } + return b + } + return e.maxMatchOff +} + +// Block returns the current block. +func (e *fastBase) Block() *blockEnc { + return e.blk +} + +func (e *fastBase) addBlock(src []byte) int32 { + if debugAsserts && e.cur > bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) + } + // check if we have space already + if len(e.hist)+len(src) > cap(e.hist) { + if cap(e.hist) == 0 { + e.ensureHist(len(src)) + } else { + if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { + panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) + } + // Move down + offset := int32(len(e.hist)) - e.maxMatchOff + copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) + e.cur += offset + e.hist = e.hist[:e.maxMatchOff] + } + } + s := int32(len(e.hist)) + e.hist = append(e.hist, src...) + return s +} + +// ensureHist will ensure that history can keep at least this many bytes. +func (e *fastBase) ensureHist(n int) { + if cap(e.hist) >= n { + return + } + l := e.maxMatchOff + if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { + l += maxCompressedBlockSize + } else { + l += e.maxMatchOff + } + // Make it at least 1MB. + if l < 1<<20 && !e.lowMem { + l = 1 << 20 + } + // Make it at least the requested size. + if l < int32(n) { + l = int32(n) + } + e.hist = make([]byte, 0, l) +} + +// useBlock will replace the block with the provided one, +// but transfer recent offsets from the previous. +func (e *fastBase) UseBlock(enc *blockEnc) { + enc.reset(e.blk) + e.blk = enc +} + +func (e *fastBase) matchlen(s, t int32, src []byte) int32 { + if debugAsserts { + if s < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if t < 0 { + err := fmt.Sprintf("s (%d) < 0", s) + panic(err) + } + if s-t > e.maxMatchOff { + err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) + panic(err) + } + if len(src)-int(s) > maxCompressedBlockSize { + panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) + } + } + a := src[s:] + b := src[t:] + b = b[:len(a)] + end := int32((len(a) >> 3) << 3) + for i := int32(0); i < end; i += 8 { + if diff := load6432(a, i) ^ load6432(b, i); diff != 0 { + return i + int32(bits.TrailingZeros64(diff)>>3) + } + } + + a = a[end:] + b = b[end:] + for i := range a { + if a[i] != b[i] { + return int32(i) + end + } + } + return int32(len(a)) + end +} + +// Reset the encoding table. +func (e *fastBase) resetBase(d *dict, singleBlock bool) { + if e.blk == nil { + e.blk = &blockEnc{lowMem: e.lowMem} + e.blk.init() + } else { + e.blk.reset(nil) + } + e.blk.initNewEncode() + if e.crc == nil { + e.crc = xxhash.New() + } else { + e.crc.Reset() + } + if d != nil { + low := e.lowMem + if singleBlock { + e.lowMem = true + } + e.ensureHist(d.DictContentSize() + maxCompressedBlockSize) + e.lowMem = low + } + + // We offset current position so everything will be out of reach. + // If above reset line, history will be purged. + if e.cur < bufferReset { + e.cur += e.maxMatchOff + int32(len(e.hist)) + } + e.hist = e.hist[:0] + if d != nil { + // Set offsets (currently not used) + for i, off := range d.offsets { + e.blk.recentOffsets[i] = uint32(off) + e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] + } + // Transfer litenc. + e.blk.dictLitEnc = d.litEnc + e.hist = append(e.hist, d.content...) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go new file mode 100644 index 00000000..dbbb88d9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -0,0 +1,559 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "fmt" + + "github.com/klauspost/compress" +) + +const ( + bestLongTableBits = 22 // Bits used in the long match table + bestLongTableSize = 1 << bestLongTableBits // Size of the table + bestLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + bestShortTableBits = 18 // Bits used in the short match table + bestShortTableSize = 1 << bestShortTableBits // Size of the table + bestShortLen = 4 // Bytes used for table hash + +) + +type match struct { + offset int32 + s int32 + length int32 + rep int32 + est int32 + _ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes +} + +const highScore = 25000 + +// estBits will estimate output bits from predefined tables. +func (m *match) estBits(bitsPerByte int32) { + mlc := mlCode(uint32(m.length - zstdMinMatch)) + var ofc uint8 + if m.rep < 0 { + ofc = ofCode(uint32(m.s-m.offset) + 3) + } else { + ofc = ofCode(uint32(m.rep)) + } + // Cost, excluding + ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] + + // Add cost of match encoding... + m.est = int32(ofTT.outBits + mlTT.outBits) + m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) + // Subtract savings compared to literal encoding... + m.est -= (m.length * bitsPerByte) >> 10 + if m.est > 0 { + // Unlikely gain.. + m.length = 0 + m.est = highScore + } +} + +// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type bestFastEncoder struct { + fastBase + table [bestShortTableSize]prevEntry + longTable [bestLongTableSize]prevEntry + dictTable []prevEntry + dictLongTable []prevEntry +} + +// Encode improves compression... +func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 4 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = prevEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + v2 := e.table[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.table[i] = prevEntry{ + offset: v, + prev: v2, + } + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Use this to estimate literal cost. + // Scaled by 10 bits. + bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) + // Huffman can never go < 1 bit/byte + if bitsPerByte < 1024 { + bitsPerByte = 1024 + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + const kSearchStrength = 10 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + offset3 := int32(blk.recentOffsets[2]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + _ = addLiterals + + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + bestOf := func(a, b match) match { + if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 { + return a + } + return b + } + const goodEnough = 100 + + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + matchAt := func(offset int32, s int32, first uint32, rep int32) match { + if s-offset >= e.maxMatchOff || load3232(src, offset) != first { + return match{s: s, est: highScore} + } + if debugAsserts { + if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { + panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) + } + } + m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} + m.estBits(bitsPerByte) + return m + } + + best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) + + if canRepeat && best.length < goodEnough { + cv32 := uint32(cv >> 8) + spp := s + 1 + best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) + best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) + best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) + if best.length > 0 { + cv32 = uint32(cv >> 24) + spp += 2 + best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) + best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) + best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) + } + } + // Load next and check... + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} + + // Look far ahead, unless we have a really long match already... + if best.length < goodEnough { + // No match found, move forward on input, no need to check forward... + if best.length < 4 { + s += 1 + (s-nextEmit)>>(kSearchStrength-1) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + continue + } + + s++ + candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] + cv = load6432(src, s) + cv2 := load6432(src, s+1) + candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] + candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] + + // Short at s+1 + best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + // Long at s+1, s+2 + best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) + best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) + best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) + if false { + // Short at s+3. + // Too often worse... + best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)) + } + // See if we can find a better match by checking where the current best ends. + // Use that offset to see if we can find a better full match. + if sAt := best.s + best.length; sAt < sLimit { + nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) + candidateEnd := e.longTable[nextHashL] + if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 { + bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1)) + if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 { + bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1)) + } + best = bestEnd + } + } + } + + if debugAsserts { + if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { + panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) + } + } + + // We have a match, we can store the forward value + if best.rep > 0 { + s = best.s + var seq seq + seq.matchLen = uint32(best.length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := best.s + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + repIndex := best.offset + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = uint32(best.rep) + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + s = best.s + best.length + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, best.length) + + } + break encodeLoop + } + // Index skipped... + off := index0 + e.cur + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + off++ + index0++ + } + switch best.rep { + case 2: + offset1, offset2 = offset2, offset1 + case 3: + offset1, offset2, offset3 = offset3, offset1, offset2 + } + cv = load6432(src, s) + continue + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + s = best.s + t := best.offset + offset1, offset2, offset3 = s-t, offset1, offset2 + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := best.length + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + // every entry + for index0 < s-1 { + cv0 := load6432(src, index0) + h0 := hashLen(cv0, bestLongTableBits, bestLongLen) + h1 := hashLen(cv0, bestShortTableBits, bestShortLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} + index0++ + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) + nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + blk.recentOffsets[2] = uint32(offset3) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Reset will reset and set a dictionary if not nil +func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]prevEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = bestShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 + e.dictTable[nextHash] = prevEntry{ + prev: e.dictTable[nextHash].offset, + offset: i, + } + e.dictTable[nextHash1] = prevEntry{ + prev: e.dictTable[nextHash1].offset, + offset: i + 1, + } + e.dictTable[nextHash2] = prevEntry{ + prev: e.dictTable[nextHash2].offset, + offset: i + 2, + } + e.dictTable[nextHash3] = prevEntry{ + prev: e.dictTable[nextHash3].offset, + offset: i + 3, + } + } + e.lastDictID = d.id + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, bestLongTableBits, bestLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + } + // Reset table to initial state + copy(e.longTable[:], e.dictLongTable) + + e.cur = e.maxMatchOff + // Reset table to initial state + copy(e.table[:], e.dictTable) +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go new file mode 100644 index 00000000..d70e3fd3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -0,0 +1,1246 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + betterLongTableBits = 19 // Bits used in the long match table + betterLongTableSize = 1 << betterLongTableBits // Size of the table + betterLongLen = 8 // Bytes used for table hash + + // Note: Increasing the short table bits or making the hash shorter + // can actually lead to compression degradation since it will 'steal' more from the + // long match table and match offsets are quite big. + // This greatly depends on the type of input. + betterShortTableBits = 13 // Bits used in the short match table + betterShortTableSize = 1 << betterShortTableBits // Size of the table + betterShortLen = 5 // Bytes used for table hash + + betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table + betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard + + betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table + betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard +) + +type prevEntry struct { + offset int32 + prev int32 +} + +// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. +// The long match table contains the previous entry with the same hash, +// effectively making it a "chain" of length 2. +// When we find a long match we choose between the two values and select the longest. +// When we find a short match, after checking the long, we check if we can find a long at n+1 +// and that it is longer (lazy matching). +type betterFastEncoder struct { + fastBase + table [betterShortTableSize]tableEntry + longTable [betterLongTableSize]prevEntry +} + +type betterFastEncoderDict struct { + betterFastEncoder + dictTable []tableEntry + dictLongTable []prevEntry + shortTableShardDirty [betterShortTableShardCnt]bool + longTableShardDirty [betterLongTableShardCnt]bool + allDirty bool +} + +// Encode improves compression... +func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + // Allow some bytes at the beginning to mismatch. + // Sweet spot is around 3 bytes, but depends on input. + // The skipped bytes are tested in Extend backwards, + // and still picked up as part of the match if they do. + const skipBeginning = 3 + + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + s2 := s + skipBeginning + cv := load3232(src, s2) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + skipBeginning + if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + s = s2 + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + e.ensureHist(len(src)) + e.Encode(blk, src) +} + +// Encode improves compression... +func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = prevEntry{} + } + e.cur = e.maxMatchOff + e.allDirty = true + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + v2 := e.longTable[i].prev + if v < minOff { + v = 0 + v2 = 0 + } else { + v = v - e.cur + e.maxMatchOff + if v2 < minOff { + v2 = 0 + } else { + v2 = v2 - e.cur + e.maxMatchOff + } + } + e.longTable[i] = prevEntry{ + offset: v, + prev: v2, + } + } + e.allDirty = true + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 9 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + var matched int32 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + off := s + e.cur + e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Index match start+1 (long) -> s - 1 + index0 := s + repOff + s += lenght + repOff + + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + continue + } + const repOff2 = 1 + + // We deviate from the reference encoder and also check offset 2. + // Still slower and not much better, so disabled. + // repIndex = s - offset2 + repOff2 + if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { + // Consider history as well. + var seq seq + lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 2 + seq.offset = 2 + if debugSequences { + println("repeat sequence 2", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + index0 := s + repOff2 + s += lenght + repOff2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + + // Index skipped... + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + cv = load6432(src, s) + // Swap offsets + offset1, offset2 = offset2, offset1 + continue + } + } + // Find the offsets of our two matches. + coffsetL := candidateL.offset - e.cur + coffsetLP := candidateL.prev - e.cur + + // Check if we have a long match. + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetL+8, src) + 8 + t = coffsetL + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 + if prevMatch > matched { + matched = prevMatch + t = coffsetLP + } + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + } + break + } + + // Check if we have a long match on prev. + if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { + // Found a long match, at least 8 bytes. + matched = e.matchlen(s+8, coffsetLP+8, src) + 8 + t = coffsetLP + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + coffsetS := candidateS.offset - e.cur + + // Check if we have a short match. + if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + matched = e.matchlen(s+4, coffsetS+4, src) + 4 + + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = candidateL.offset - e.cur + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} + e.markLongShardDirty(nextHashL) + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("long match (after short)") + } + break + } + } + + // Check prev long... + coffsetL = candidateL.prev - e.cur + if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { + // Found a long match, at least 8 bytes. + matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 + if matchedNext > matched { + t = coffsetL + s += checkAt + matched = matchedNext + if debugMatches { + println("prev long match (after short)") + } + break + } + } + t = coffsetS + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // Try to find a better match by searching for a long match at the end of the current best match + if s+matched < sLimit { + nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) + cv := load3232(src, s) + candidateL := e.longTable[nextHashL] + coffsetL := candidateL.offset - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("long match at end-of-match") + } + } + } + + // Check prev long... + if true { + coffsetL = candidateL.prev - e.cur - matched + if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { + // Found a long match, at least 4 bytes. + matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 + if matchedNext > matched { + t = coffsetL + matched = matchedNext + if debugMatches { + println("prev long match at end-of-match") + } + } + } + } + } + // A match has been found. Update recent offsets. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the n-byte match as long as possible. + l := matched + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) -> s - 1 + index0 := s - l + 1 + for index0 < s-1 { + cv0 := load6432(src, index0) + cv1 := cv0 >> 8 + h0 := hashLen(cv0, betterLongTableBits, betterLongLen) + off := index0 + e.cur + e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} + e.markLongShardDirty(h0) + h1 := hashLen(cv1, betterShortTableBits, betterShortLen) + e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} + e.markShortShardDirty(h1) + index0 += 2 + } + + cv = load6432(src, s) + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) + nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShortShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("betterFastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff; i < end; i += 4 { + const hashLog = betterShortTableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 + nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 + nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 + nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + e.dictTable[nextHash3] = tableEntry{ + val: uint32(cv >> 24), + offset: i + 3, + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]prevEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: e.maxMatchOff, + prev: e.dictLongTable[h].offset, + } + + end := int32(len(d.content)) - 8 + e.maxMatchOff + off := 8 // First to read + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[off]) << 56) + h := hashLen(cv, betterLongTableBits, betterLongLen) + e.dictLongTable[h] = prevEntry{ + offset: i, + prev: e.dictLongTable[h].offset, + } + off++ + } + } + e.lastDictID = d.id + e.allDirty = true + } + + // Reset table to initial state + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterShortTableShardCnt + const shardSize = betterShortTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.table[:], e.dictTable) + for i := range e.shortTableShardDirty { + e.shortTableShardDirty[i] = false + } + } else { + for i := range e.shortTableShardDirty { + if !e.shortTableShardDirty[i] { + continue + } + + copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + e.shortTableShardDirty[i] = false + } + } + } + { + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.shortTableShardDirty { + if e.shortTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + const shardCnt = betterLongTableShardCnt + const shardSize = betterLongTableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + copy(e.longTable[:], e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + } else { + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) + e.longTableShardDirty[i] = false + } + } + } + e.cur = e.maxMatchOff + e.allDirty = false +} + +func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/betterLongTableShardSize] = true +} + +func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { + e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go new file mode 100644 index 00000000..1f4a9a24 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -0,0 +1,1127 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "fmt" + +const ( + dFastLongTableBits = 17 // Bits used in the long match table + dFastLongTableSize = 1 << dFastLongTableBits // Size of the table + dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastLongLen = 8 // Bytes used for table hash + + dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table + dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + + dFastShortTableBits = tableBits // Bits used in the short match table + dFastShortTableSize = 1 << dFastShortTableBits // Size of the table + dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + dFastShortLen = 5 // Bytes used for table hash + +) + +type doubleFastEncoder struct { + fastEncoder + longTable [dFastLongTableSize]tableEntry +} + +type doubleFastEncoderDict struct { + fastEncoderDict + longTable [dFastLongTableSize]tableEntry + dictLongTable []tableEntry + longTableShardDirty [dLongTableShardCnt]bool +} + +// Encode mimmics functionality in zstd_dfast.c +func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + if e.cur >= bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + for { + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + + if len(blk.sequences) > 2 { + if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + // Extend the 4-byte match as long as possible. + //l := e.matchlen(s+4, t+4, src) + 4 + l := int32(matchLen(src[s+4:], src[t+4:])) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 + e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 + e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 + + cv = load6432(src, s) + + if len(blk.sequences) <= 2 { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + //l := 4 + e.matchlen(s+4, o2+4, src) + l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.table[nextHashS] = entry + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + // Input margin is the number of bytes we read (8) + // and the maximum we will read ahead (2) + inputMargin = 8 + 2 + minNonLiteralBlockSize = 16 + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + for i := range e.longTable[:] { + e.longTable[i] = tableEntry{} + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + for i := range e.longTable[:] { + v := e.longTable[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.longTable[i].offset = v + } + e.markAllShardsDirty() + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 1. + const stepSize = 1 + + const kSearchStrength = 8 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + var t int32 + // We allow the encoder to optionally turn off repeat offsets across blocks + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + candidateL := e.longTable[nextHashL] + candidateS := e.table[nextHashS] + + const repOff = 1 + repIndex := s - offset1 + repOff + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + + if canRepeat { + if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { + // Consider history as well. + var seq seq + lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + + seq.matchLen = uint32(lenght - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + repOff + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += lenght + repOff + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, lenght) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + } + // Find the offsets of our two matches. + coffsetL := s - (candidateL.offset - e.cur) + coffsetS := s - (candidateS.offset - e.cur) + + // Check if we have a long match. + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugMatches { + println("long match") + } + break + } + + // Check if we have a short match. + if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { + // found a regular match + // See if we can find a long match at s+1 + const checkAt = 1 + cv := load6432(src, s+checkAt) + nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) + candidateL = e.longTable[nextHashL] + coffsetL = s - (candidateL.offset - e.cur) + checkAt + + // We can store it, since we have at least a 4 byte match. + e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} + e.markLongShardDirty(nextHashL) + if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { + // Found a long match, likely at least 8 bytes. + // Reference encoder checks all 8 bytes, we only check 4, + // but the likelihood of both the first 4 bytes and the hash matching should be enough. + t = candidateL.offset - e.cur + s += checkAt + if debugMatches { + println("long match (after short)") + } + break + } + + t = candidateS.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + if debugMatches { + println("short match") + } + break + } + + // No match found, move forward in input. + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + + // A 4-byte match has been found. Update recent offsets. + // We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + + // Index match start+1 (long) and start+2 (short) + index0 := s - l + 1 + // Index match end-2 (long) and end-1 (short) + index1 := s - 2 + + cv0 := load6432(src, index0) + cv1 := load6432(src, index1) + te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} + te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} + longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) + longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) + e.longTable[longHash1] = te0 + e.longTable[longHash2] = te1 + e.markLongShardDirty(longHash1) + e.markLongShardDirty(longHash2) + cv0 >>= 8 + cv1 >>= 8 + te0.offset++ + te1.offset++ + te0.val = uint32(cv0) + te1.val = uint32(cv1) + hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) + hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) + e.table[hashVal1] = te0 + e.markShardDirty(hashVal1) + e.table[hashVal2] = te1 + e.markShardDirty(hashVal2) + + cv = load6432(src, s) + + if !canRepeat { + continue + } + + // Check offset 2 + for { + o2 := s - offset2 + if load3232(src, o2) != uint32(cv) { + // Do regular search + break + } + + // Store this, since we have it. + nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) + nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) + + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + entry := tableEntry{offset: s + e.cur, val: uint32(cv)} + e.longTable[nextHashL] = entry + e.markLongShardDirty(nextHashL) + e.table[nextHashS] = entry + e.markShardDirty(nextHashS) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + // Finished + break encodeLoop + } + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // If we encoded more than 64K mark all dirty. + if len(src) > 64<<10 { + e.markAllShardsDirty() + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { + e.fastEncoder.Reset(d, singleBlock) + if d != nil { + panic("doubleFastEncoder: Reset with dict not supported") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { + allDirty := e.allDirty + e.fastEncoderDict.Reset(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { + if len(e.dictLongTable) != len(e.longTable) { + e.dictLongTable = make([]tableEntry, len(e.longTable)) + } + if len(d.content) >= 8 { + cv := load6432(d.content, 0) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: e.maxMatchOff, + } + end := int32(len(d.content)) - 8 + e.maxMatchOff + for i := e.maxMatchOff + 1; i < end; i++ { + cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) + e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ + val: uint32(cv), + offset: i, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + // Reset table to initial state + e.cur = e.maxMatchOff + + dirtyShardCnt := 0 + if !allDirty { + for i := range e.longTableShardDirty { + if e.longTableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { + //copy(e.longTable[:], e.dictLongTable) + e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) + for i := range e.longTableShardDirty { + e.longTableShardDirty[i] = false + } + return + } + for i := range e.longTableShardDirty { + if !e.longTableShardDirty[i] { + continue + } + + // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) + *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) + + e.longTableShardDirty[i] = false + } +} + +func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { + e.longTableShardDirty[entryNum/dLongTableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go new file mode 100644 index 00000000..181edc02 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -0,0 +1,900 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" +) + +const ( + tableBits = 15 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table + tableShardSize = tableSize / tableShardCnt // Size of an individual shard + tableFastHashLen = 6 + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + maxMatchLength = 131074 +) + +type tableEntry struct { + val uint32 + offset int32 +} + +type fastEncoder struct { + fastBase + table [tableSize]tableEntry +} + +type fastEncoderDict struct { + fastEncoder + dictTable []tableEntry + tableShardDirty [tableShardCnt]bool + allDirty bool +} + +// Encode mimmics functionality in zstd_fast.c +func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + length = 4 + e.matchlen(s+6, repIndex+4, src) + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// EncodeNoHist will encode a block with no history and no following blocks. +// Most notable difference is that src will not be copied for history and +// we do not need to check for max match length. +func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if debugEncoder { + if len(src) > maxBlockSize { + panic("src too big") + } + } + + // Protect against e.cur wraparound. + if e.cur >= bufferReset { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + } + + s := int32(0) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 6 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + + for { + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + + if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + length := 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && t < 0 { + panic(fmt.Sprintf("t (%d) < 0 ", t)) + } + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } + // We do not store history, so we must offset e.cur to avoid false matches for next user. + if e.cur < bufferReset { + e.cur += int32(len(src)) + } +} + +// Encode will encode the content, with a dictionary if initialized for it. +func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { + const ( + inputMargin = 8 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + if e.allDirty || len(src) > 32<<10 { + e.fastEncoder.Encode(blk, src) + e.allDirty = true + return + } + // Protect against e.cur wraparound. + for e.cur >= bufferReset { + if len(e.hist) == 0 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = e.maxMatchOff + break + } + // Shift down everything in the table that isn't already too far away. + minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff + for i := range e.table[:] { + v := e.table[i].offset + if v < minOff { + v = 0 + } else { + v = v - e.cur + e.maxMatchOff + } + e.table[i].offset = v + } + e.cur = e.maxMatchOff + break + } + + s := e.addBlock(src) + blk.size = len(src) + if len(src) < minNonLiteralBlockSize { + blk.extraLits = len(src) + blk.literals = blk.literals[:len(src)] + copy(blk.literals, src) + return + } + + // Override src + src = e.hist + sLimit := int32(len(src)) - inputMargin + // stepSize is the number of bytes to skip on every main loop iteration. + // It should be >= 2. + const stepSize = 2 + + // TEMPLATE + const hashLog = tableBits + // seems global, but would be nice to tweak. + const kSearchStrength = 7 + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := s + cv := load6432(src, s) + + // Relative offsets + offset1 := int32(blk.recentOffsets[0]) + offset2 := int32(blk.recentOffsets[1]) + + addLiterals := func(s *seq, until int32) { + if until == nextEmit { + return + } + blk.literals = append(blk.literals, src[nextEmit:until]...) + s.litLen = uint32(until - nextEmit) + } + if debugEncoder { + println("recent offsets:", blk.recentOffsets) + } + +encodeLoop: + for { + // t will contain the match offset when we find one. + // When existing the search loop, we have already checked 4 bytes. + var t int32 + + // We will not use repeat offsets across blocks. + // By not using them for the first 3 matches + canRepeat := len(blk.sequences) > 2 + + for { + if debugAsserts && canRepeat && offset1 == 0 { + panic("offset0 was 0") + } + + nextHash := hashLen(cv, hashLog, tableFastHashLen) + nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) + candidate := e.table[nextHash] + candidate2 := e.table[nextHash2] + repIndex := s - offset1 + 2 + + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} + e.markShardDirty(nextHash2) + + if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { + // Consider history as well. + var seq seq + var length int32 + length = 4 + e.matchlen(s+6, repIndex+4, src) + + seq.matchLen = uint32(length - zstdMinMatch) + + // We might be able to match backwards. + // Extend as long as we can. + start := s + 2 + // We end the search early, so we don't risk 0 literals + // and have to do special offset treatment. + startLimit := nextEmit + 1 + + sMin := s - e.maxMatchOff + if sMin < 0 { + sMin = 0 + } + for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { + repIndex-- + start-- + seq.matchLen++ + } + addLiterals(&seq, start) + + // rep 0 + seq.offset = 1 + if debugSequences { + println("repeat sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + s += length + 2 + nextEmit = s + if s >= sLimit { + if debugEncoder { + println("repeat ended", s, length) + + } + break encodeLoop + } + cv = load6432(src, s) + continue + } + coffset0 := s - (candidate.offset - e.cur) + coffset1 := s - (candidate2.offset - e.cur) + 1 + if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { + // found a regular match + t = candidate.offset - e.cur + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + break + } + + if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { + // found a regular match + t = candidate2.offset - e.cur + s++ + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + if debugAsserts && s-t > e.maxMatchOff { + panic("s - t >e.maxMatchOff") + } + if debugAsserts && t < 0 { + panic("t<0") + } + break + } + s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + } + // A 4-byte match has been found. We'll later see if more than 4 bytes. + offset2 = offset1 + offset1 = s - t + + if debugAsserts && s <= t { + panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) + } + + if debugAsserts && canRepeat && int(offset1) > len(src) { + panic("invalid offset") + } + + // Extend the 4-byte match as long as possible. + l := e.matchlen(s+4, t+4, src) + 4 + + // Extend backwards + tMin := s - e.maxMatchOff + if tMin < 0 { + tMin = 0 + } + for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { + s-- + t-- + l++ + } + + // Write our sequence. + var seq seq + seq.litLen = uint32(s - nextEmit) + seq.matchLen = uint32(l - zstdMinMatch) + if seq.litLen > 0 { + blk.literals = append(blk.literals, src[nextEmit:s]...) + } + // Don't use repeat offsets + seq.offset = uint32(s-t) + 3 + s += l + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + nextEmit = s + if s >= sLimit { + break encodeLoop + } + cv = load6432(src, s) + + // Check offset 2 + if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { + // We have at least 4 byte match. + // No need to check backwards. We come straight from a match + l := 4 + e.matchlen(s+4, o2+4, src) + + // Store this, since we have it. + nextHash := hashLen(cv, hashLog, tableFastHashLen) + e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} + e.markShardDirty(nextHash) + seq.matchLen = uint32(l) - zstdMinMatch + seq.litLen = 0 + // Since litlen is always 0, this is offset 1. + seq.offset = 1 + s += l + nextEmit = s + if debugSequences { + println("sequence", seq, "next s:", s) + } + blk.sequences = append(blk.sequences, seq) + + // Swap offset 1 and 2. + offset1, offset2 = offset2, offset1 + if s >= sLimit { + break encodeLoop + } + // Prepare next loop. + cv = load6432(src, s) + } + } + + if int(nextEmit) < len(src) { + blk.literals = append(blk.literals, src[nextEmit:]...) + blk.extraLits = len(src) - int(nextEmit) + } + blk.recentOffsets[0] = uint32(offset1) + blk.recentOffsets[1] = uint32(offset2) + if debugEncoder { + println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoder) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d != nil { + panic("fastEncoder: Reset with dict") + } +} + +// ResetDict will reset and set a dictionary if not nil +func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { + e.resetBase(d, singleBlock) + if d == nil { + return + } + + // Init or copy dict table + if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { + if len(e.dictTable) != len(e.table) { + e.dictTable = make([]tableEntry, len(e.table)) + } + if true { + end := e.maxMatchOff + int32(len(d.content)) - 8 + for i := e.maxMatchOff; i < end; i += 3 { + const hashLog = tableBits + + cv := load6432(d.content, i-e.maxMatchOff) + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6 + nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7 + e.dictTable[nextHash] = tableEntry{ + val: uint32(cv), + offset: i, + } + e.dictTable[nextHash1] = tableEntry{ + val: uint32(cv >> 8), + offset: i + 1, + } + e.dictTable[nextHash2] = tableEntry{ + val: uint32(cv >> 16), + offset: i + 2, + } + } + } + e.lastDictID = d.id + e.allDirty = true + } + + e.cur = e.maxMatchOff + dirtyShardCnt := 0 + if !e.allDirty { + for i := range e.tableShardDirty { + if e.tableShardDirty[i] { + dirtyShardCnt++ + } + } + } + + const shardCnt = tableShardCnt + const shardSize = tableShardSize + if e.allDirty || dirtyShardCnt > shardCnt*4/6 { + //copy(e.table[:], e.dictTable) + e.table = *(*[tableSize]tableEntry)(e.dictTable) + for i := range e.tableShardDirty { + e.tableShardDirty[i] = false + } + e.allDirty = false + return + } + for i := range e.tableShardDirty { + if !e.tableShardDirty[i] { + continue + } + + //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) + *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) + e.tableShardDirty[i] = false + } + e.allDirty = false +} + +func (e *fastEncoderDict) markAllShardsDirty() { + e.allDirty = true +} + +func (e *fastEncoderDict) markShardDirty(entryNum uint32) { + e.tableShardDirty[entryNum/tableShardSize] = true +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go new file mode 100644 index 00000000..7aaaedb2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -0,0 +1,641 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "crypto/rand" + "fmt" + "io" + rdebug "runtime/debug" + "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +// Encoder provides encoding to Zstandard. +// An Encoder can be used for either compressing a stream via the +// io.WriteCloser interface supported by the Encoder or as multiple independent +// tasks via the EncodeAll function. +// Smaller encodes are encouraged to use the EncodeAll function. +// Use NewWriter to create a new instance. +type Encoder struct { + o encoderOptions + encoders chan encoder + state encoderState + init sync.Once +} + +type encoder interface { + Encode(blk *blockEnc, src []byte) + EncodeNoHist(blk *blockEnc, src []byte) + Block() *blockEnc + CRC() *xxhash.Digest + AppendCRC([]byte) []byte + WindowSize(size int64) int32 + UseBlock(*blockEnc) + Reset(d *dict, singleBlock bool) +} + +type encoderState struct { + w io.Writer + filling []byte + current []byte + previous []byte + encoder encoder + writing *blockEnc + err error + writeErr error + nWritten int64 + nInput int64 + frameContentSize int64 + headerWritten bool + eofWritten bool + fullFrameWritten bool + + // This waitgroup indicates an encode is running. + wg sync.WaitGroup + // This waitgroup indicates we have a block encoding/writing. + wWg sync.WaitGroup +} + +// NewWriter will create a new Zstandard encoder. +// If the encoder will be used for encoding blocks a nil writer can be used. +func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { + initPredefined() + var e Encoder + e.o.setDefault() + for _, o := range opts { + err := o(&e.o) + if err != nil { + return nil, err + } + } + if w != nil { + e.Reset(w) + } + return &e, nil +} + +func (e *Encoder) initialize() { + if e.o.concurrent == 0 { + e.o.setDefault() + } + e.encoders = make(chan encoder, e.o.concurrent) + for i := 0; i < e.o.concurrent; i++ { + enc := e.o.encoder() + e.encoders <- enc + } +} + +// Reset will re-initialize the writer and new writes will encode to the supplied writer +// as a new, independent stream. +func (e *Encoder) Reset(w io.Writer) { + s := &e.state + s.wg.Wait() + s.wWg.Wait() + if cap(s.filling) == 0 { + s.filling = make([]byte, 0, e.o.blockSize) + } + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() + } + if s.encoder == nil { + s.encoder = e.o.encoder() + } + s.filling = s.filling[:0] + s.encoder.Reset(e.o.dict, false) + s.headerWritten = false + s.eofWritten = false + s.fullFrameWritten = false + s.w = w + s.err = nil + s.nWritten = 0 + s.nInput = 0 + s.writeErr = nil + s.frameContentSize = 0 +} + +// ResetContentSize will reset and set a content size for the next stream. +// If the bytes written does not match the size given an error will be returned +// when calling Close(). +// This is removed when Reset is called. +// Sizes <= 0 results in no content size set. +func (e *Encoder) ResetContentSize(w io.Writer, size int64) { + e.Reset(w) + if size >= 0 { + e.state.frameContentSize = size + } +} + +// Write data to the encoder. +// Input data will be buffered and as the buffer fills up +// content will be compressed and written to the output. +// When done writing, use Close to flush the remaining output +// and write CRC if requested. +func (e *Encoder) Write(p []byte) (n int, err error) { + s := &e.state + for len(p) > 0 { + if len(p)+len(s.filling) < e.o.blockSize { + if e.o.crc { + _, _ = s.encoder.CRC().Write(p) + } + s.filling = append(s.filling, p...) + return n + len(p), nil + } + add := p + if len(p)+len(s.filling) > e.o.blockSize { + add = add[:e.o.blockSize-len(s.filling)] + } + if e.o.crc { + _, _ = s.encoder.CRC().Write(add) + } + s.filling = append(s.filling, add...) + p = p[len(add):] + n += len(add) + if len(s.filling) < e.o.blockSize { + return n, nil + } + err := e.nextBlock(false) + if err != nil { + return n, err + } + if debugAsserts && len(s.filling) > 0 { + panic(len(s.filling)) + } + } + return n, nil +} + +// nextBlock will synchronize and start compressing input in e.state.filling. +// If an error has occurred during encoding it will be returned. +func (e *Encoder) nextBlock(final bool) error { + s := &e.state + // Wait for current block. + s.wg.Wait() + if s.err != nil { + return s.err + } + if len(s.filling) > e.o.blockSize { + return fmt.Errorf("block > maxStoreBlockSize") + } + if !s.headerWritten { + // If we have a single block encode, do a sync compression. + if final && len(s.filling) == 0 && !e.o.fullZero { + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + if final && len(s.filling) > 0 { + s.current = e.EncodeAll(s.filling, s.current[:0]) + var n2 int + n2, s.err = s.w.Write(s.current) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + s.nInput += int64(len(s.filling)) + s.current = s.current[:0] + s.filling = s.filling[:0] + s.headerWritten = true + s.fullFrameWritten = true + s.eofWritten = true + return nil + } + + var tmp [maxHeaderSize]byte + fh := frameHeader{ + ContentSize: uint64(s.frameContentSize), + WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), + SingleSegment: false, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + dst, err := fh.appendTo(tmp[:0]) + if err != nil { + return err + } + s.headerWritten = true + s.wWg.Wait() + var n2 int + n2, s.err = s.w.Write(dst) + if s.err != nil { + return s.err + } + s.nWritten += int64(n2) + } + if s.eofWritten { + // Ensure we only write it once. + final = false + } + + if len(s.filling) == 0 { + // Final block, but no data. + if final { + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + blk.last = true + blk.encodeRaw(nil) + s.wWg.Wait() + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.eofWritten = true + } + return s.err + } + + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.err = err + return err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + + // Move blocks forward. + s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current + s.nInput += int64(len(s.current)) + s.wg.Add(1) + go func(src []byte) { + if debugEncoder { + println("Adding block,", len(src), "bytes, final:", final) + } + defer func() { + if r := recover(); r != nil { + s.err = fmt.Errorf("panic while encoding: %v", r) + rdebug.PrintStack() + } + s.wg.Done() + }() + enc := s.encoder + blk := enc.Block() + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + // Wait for pending writes. + s.wWg.Wait() + if s.writeErr != nil { + s.err = s.writeErr + return + } + // Transfer encoders from previous write block. + blk.swapEncoders(s.writing) + // Transfer recent offsets to next. + enc.UseBlock(s.writing) + s.writing = blk + s.wWg.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) + rdebug.PrintStack() + } + s.wWg.Done() + }() + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.writeErr = err + return + } + _, s.writeErr = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + }() + }(s.current) + return nil +} + +// ReadFrom reads data from r until EOF or error. +// The return value n is the number of bytes read. +// Any error except io.EOF encountered during the read is also returned. +// +// The Copy function uses ReaderFrom if available. +func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { + if debugEncoder { + println("Using ReadFrom") + } + + // Flush any current writes. + if len(e.state.filling) > 0 { + if err := e.nextBlock(false); err != nil { + return 0, err + } + } + e.state.filling = e.state.filling[:e.o.blockSize] + src := e.state.filling + for { + n2, err := r.Read(src) + if e.o.crc { + _, _ = e.state.encoder.CRC().Write(src[:n2]) + } + // src is now the unfilled part... + src = src[n2:] + n += int64(n2) + switch err { + case io.EOF: + e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] + if debugEncoder { + println("ReadFrom: got EOF final block:", len(e.state.filling)) + } + return n, nil + case nil: + default: + if debugEncoder { + println("ReadFrom: got error:", err) + } + e.state.err = err + return n, err + } + if len(src) > 0 { + if debugEncoder { + println("ReadFrom: got space left in source:", len(src)) + } + continue + } + err = e.nextBlock(false) + if err != nil { + return n, err + } + e.state.filling = e.state.filling[:e.o.blockSize] + src = e.state.filling + } +} + +// Flush will send the currently written data to output +// and block until everything has been written. +// This should only be used on rare occasions where pushing the currently queued data is critical. +func (e *Encoder) Flush() error { + s := &e.state + if len(s.filling) > 0 { + err := e.nextBlock(false) + if err != nil { + return err + } + } + s.wg.Wait() + s.wWg.Wait() + if s.err != nil { + return s.err + } + return s.writeErr +} + +// Close will flush the final output and close the stream. +// The function will block until everything has been written. +// The Encoder can still be re-used after calling this. +func (e *Encoder) Close() error { + s := &e.state + if s.encoder == nil { + return nil + } + err := e.nextBlock(true) + if err != nil { + return err + } + if s.frameContentSize > 0 { + if s.nInput != s.frameContentSize { + return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) + } + } + if e.state.fullFrameWritten { + return s.err + } + s.wg.Wait() + s.wWg.Wait() + + if s.err != nil { + return s.err + } + if s.writeErr != nil { + return s.writeErr + } + + // Write CRC + if e.o.crc && s.err == nil { + // heap alloc. + var tmp [4]byte + _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) + s.nWritten += 4 + } + + // Add padding with content from crypto/rand.Reader + if s.err == nil && e.o.pad > 0 { + add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) + frame, err := skippableFrame(s.filling[:0], add, rand.Reader) + if err != nil { + return err + } + _, s.err = s.w.Write(frame) + } + return s.err +} + +// EncodeAll will encode all input in src and append it to dst. +// This function can be called concurrently, but each call will only run on a single goroutine. +// If empty input is given, nothing is returned, unless WithZeroFrames is specified. +// Encoded blocks can be concatenated and the result will be the combined input stream. +// Data compressed with EncodeAll can be decoded with the Decoder, +// using either a stream or DecodeAll. +func (e *Encoder) EncodeAll(src, dst []byte) []byte { + if len(src) == 0 { + if e.o.fullZero { + // Add frame header. + fh := frameHeader{ + ContentSize: 0, + WindowSize: MinWindowSize, + SingleSegment: true, + // Adding a checksum would be a waste of space. + Checksum: false, + DictID: 0, + } + dst, _ = fh.appendTo(dst) + + // Write raw block as last one only. + var blk blockHeader + blk.setSize(0) + blk.setType(blockTypeRaw) + blk.setLast(true) + dst = blk.appendTo(dst) + } + return dst + } + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + // Release encoder reference to last block. + // If a non-single block is needed the encoder will reset again. + e.encoders <- enc + }() + // Use single segments when above minimum window and below window size. + single := len(src) <= e.o.windowSize && len(src) > MinWindowSize + if e.o.single != nil { + single = *e.o.single + } + fh := frameHeader{ + ContentSize: uint64(len(src)), + WindowSize: uint32(enc.WindowSize(int64(len(src)))), + SingleSegment: single, + Checksum: e.o.crc, + DictID: e.o.dict.ID(), + } + + // If less than 1MB, allocate a buffer up front. + if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { + dst = make([]byte, 0, len(src)) + } + dst, err := fh.appendTo(dst) + if err != nil { + panic(err) + } + + // If we can do everything in one block, prefer that. + if len(src) <= e.o.blockSize { + enc.Reset(e.o.dict, true) + // Slightly faster with no history and everything in one block. + if e.o.crc { + _, _ = enc.CRC().Write(src) + } + blk := enc.Block() + blk.last = true + if e.o.dict == nil { + enc.EncodeNoHist(blk, src) + } else { + enc.Encode(blk, src) + } + + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + err := errIncompressible + oldout := blk.output + if len(blk.literals) != len(src) || len(src) != e.o.blockSize { + // Output directly to dst + blk.output = dst + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, src) + case nil: + dst = blk.output + default: + panic(err) + } + blk.output = oldout + } else { + enc.Reset(e.o.dict, false) + blk := enc.Block() + for len(src) > 0 { + todo := src + if len(todo) > e.o.blockSize { + todo = todo[:e.o.blockSize] + } + src = src[len(todo):] + if e.o.crc { + _, _ = enc.CRC().Write(todo) + } + blk.pushOffsets() + enc.Encode(blk, todo) + if len(src) == 0 { + blk.last = true + } + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize { + err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) + } + + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + dst = blk.encodeRawTo(dst, todo) + blk.popOffsets() + case nil: + dst = append(dst, blk.output...) + default: + panic(err) + } + blk.reset(nil) + } + } + if e.o.crc { + dst = enc.AppendCRC(dst) + } + // Add padding with content from crypto/rand.Reader + if e.o.pad > 0 { + add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) + dst, err = skippableFrame(dst, add, rand.Reader) + if err != nil { + panic(err) + } + } + return dst +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go new file mode 100644 index 00000000..a7c5e1aa --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -0,0 +1,317 @@ +package zstd + +import ( + "errors" + "fmt" + "runtime" + "strings" +) + +// EOption is an option for creating a encoder. +type EOption func(*encoderOptions) error + +// options retains accumulated state of multiple options. +type encoderOptions struct { + concurrent int + level EncoderLevel + single *bool + pad int + blockSize int + windowSize int + crc bool + fullZero bool + noEntropy bool + allLitEntropy bool + customWindow bool + customALEntropy bool + customBlockSize bool + lowMem bool + dict *dict +} + +func (o *encoderOptions) setDefault() { + *o = encoderOptions{ + concurrent: runtime.GOMAXPROCS(0), + crc: true, + single: nil, + blockSize: maxCompressedBlockSize, + windowSize: 8 << 20, + level: SpeedDefault, + allLitEntropy: true, + lowMem: false, + } +} + +// encoder returns an encoder with the selected options. +func (o encoderOptions) encoder() encoder { + switch o.level { + case SpeedFastest: + if o.dict != nil { + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + } + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + + case SpeedDefault: + if o.dict != nil { + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} + } + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + case SpeedBetterCompression: + if o.dict != nil { + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + } + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + case SpeedBestCompression: + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + } + panic("unknown compression level") +} + +// WithEncoderCRC will add CRC value to output. +// Output will be 4 bytes larger. +func WithEncoderCRC(b bool) EOption { + return func(o *encoderOptions) error { o.crc = b; return nil } +} + +// WithEncoderConcurrency will set the concurrency, +// meaning the maximum number of encoders to run concurrently. +// The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. +// By default this will be set to GOMAXPROCS. +func WithEncoderConcurrency(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("concurrency must be at least 1") + } + o.concurrent = n + return nil + } +} + +// WithWindowSize will set the maximum allowed back-reference distance. +// The value must be a power of two between MinWindowSize and MaxWindowSize. +// A larger value will enable better compression but allocate more memory and, +// for above-default values, take considerably longer. +// The default value is determined by the compression level. +func WithWindowSize(n int) EOption { + return func(o *encoderOptions) error { + switch { + case n < MinWindowSize: + return fmt.Errorf("window size must be at least %d", MinWindowSize) + case n > MaxWindowSize: + return fmt.Errorf("window size must be at most %d", MaxWindowSize) + case (n & (n - 1)) != 0: + return errors.New("window size must be a power of 2") + } + + o.windowSize = n + o.customWindow = true + if o.blockSize > o.windowSize { + o.blockSize = o.windowSize + o.customBlockSize = true + } + return nil + } +} + +// WithEncoderPadding will add padding to all output so the size will be a multiple of n. +// This can be used to obfuscate the exact output size or make blocks of a certain size. +// The contents will be a skippable frame, so it will be invisible by the decoder. +// n must be > 0 and <= 1GB, 1<<30 bytes. +// The padded area will be filled with data from crypto/rand.Reader. +// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. +func WithEncoderPadding(n int) EOption { + return func(o *encoderOptions) error { + if n <= 0 { + return fmt.Errorf("padding must be at least 1") + } + // No need to waste our time. + if n == 1 { + o.pad = 0 + } + if n > 1<<30 { + return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") + } + o.pad = n + return nil + } +} + +// EncoderLevel predefines encoder compression levels. +// Only use the constants made available, since the actual mapping +// of these values are very likely to change and your compression could change +// unpredictably when upgrading the library. +type EncoderLevel int + +const ( + speedNotSet EncoderLevel = iota + + // SpeedFastest will choose the fastest reasonable compression. + // This is roughly equivalent to the fastest Zstandard mode. + SpeedFastest + + // SpeedDefault is the default "pretty fast" compression option. + // This is roughly equivalent to the default Zstandard mode (level 3). + SpeedDefault + + // SpeedBetterCompression will yield better compression than the default. + // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. + // By using this, notice that CPU usage may go up in the future. + SpeedBetterCompression + + // SpeedBestCompression will choose the best available compression option. + // This will offer the best compression no matter the CPU cost. + SpeedBestCompression + + // speedLast should be kept as the last actual compression option. + // The is not for external usage, but is used to keep track of the valid options. + speedLast +) + +// EncoderLevelFromString will convert a string representation of an encoding level back +// to a compression level. The compare is not case sensitive. +// If the string wasn't recognized, (false, SpeedDefault) will be returned. +func EncoderLevelFromString(s string) (bool, EncoderLevel) { + for l := speedNotSet + 1; l < speedLast; l++ { + if strings.EqualFold(s, l.String()) { + return true, l + } + } + return false, SpeedDefault +} + +// EncoderLevelFromZstd will return an encoder level that closest matches the compression +// ratio of a specific zstd compression level. +// Many input values will provide the same compression level. +func EncoderLevelFromZstd(level int) EncoderLevel { + switch { + case level < 3: + return SpeedFastest + case level >= 3 && level < 6: + return SpeedDefault + case level >= 6 && level < 10: + return SpeedBetterCompression + default: + return SpeedBestCompression + } +} + +// String provides a string representation of the compression level. +func (e EncoderLevel) String() string { + switch e { + case SpeedFastest: + return "fastest" + case SpeedDefault: + return "default" + case SpeedBetterCompression: + return "better" + case SpeedBestCompression: + return "best" + default: + return "invalid" + } +} + +// WithEncoderLevel specifies a predefined compression level. +func WithEncoderLevel(l EncoderLevel) EOption { + return func(o *encoderOptions) error { + switch { + case l <= speedNotSet || l >= speedLast: + return fmt.Errorf("unknown encoder level") + } + o.level = l + if !o.customWindow { + switch o.level { + case SpeedFastest: + o.windowSize = 4 << 20 + if !o.customBlockSize { + o.blockSize = 1 << 16 + } + case SpeedDefault: + o.windowSize = 8 << 20 + case SpeedBetterCompression: + o.windowSize = 16 << 20 + case SpeedBestCompression: + o.windowSize = 32 << 20 + } + } + if !o.customALEntropy { + o.allLitEntropy = l > SpeedFastest + } + + return nil + } +} + +// WithZeroFrames will encode 0 length input as full frames. +// This can be needed for compatibility with zstandard usage, +// but is not needed for this package. +func WithZeroFrames(b bool) EOption { + return func(o *encoderOptions) error { + o.fullZero = b + return nil + } +} + +// WithAllLitEntropyCompression will apply entropy compression if no matches are found. +// Disabling this will skip incompressible data faster, but in cases with no matches but +// skewed character distribution compression is lost. +// Default value depends on the compression level selected. +func WithAllLitEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.customALEntropy = true + o.allLitEntropy = b + return nil + } +} + +// WithNoEntropyCompression will always skip entropy compression of literals. +// This can be useful if content has matches, but unlikely to benefit from entropy +// compression. Usually the slight speed improvement is not worth enabling this. +func WithNoEntropyCompression(b bool) EOption { + return func(o *encoderOptions) error { + o.noEntropy = b + return nil + } +} + +// WithSingleSegment will set the "single segment" flag when EncodeAll is used. +// If this flag is set, data must be regenerated within a single continuous memory segment. +// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. +// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. +// In order to preserve the decoder from unreasonable memory requirements, +// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. +// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. +// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. +// If this is not specified, block encodes will automatically choose this based on the input size and the window size. +// This setting has no effect on streamed encodes. +func WithSingleSegment(b bool) EOption { + return func(o *encoderOptions) error { + o.single = &b + return nil + } +} + +// WithLowerEncoderMem will trade in some memory cases trade less memory usage for +// slower encoding speed. +// This will not change the window size which is the primary function for reducing +// memory usage. See WithWindowSize. +func WithLowerEncoderMem(b bool) EOption { + return func(o *encoderOptions) error { + o.lowMem = b + return nil + } +} + +// WithEncoderDict allows to register a dictionary that will be used for the encode. +// The encoder *may* choose to use no dictionary instead for certain payloads. +func WithEncoderDict(dict []byte) EOption { + return func(o *encoderOptions) error { + d, err := loadDict(dict) + if err != nil { + return err + } + o.dict = d + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go new file mode 100644 index 00000000..b6c50541 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -0,0 +1,436 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "bytes" + "encoding/hex" + "errors" + "io" + + "github.com/klauspost/compress/zstd/internal/xxhash" +) + +type frameDec struct { + o decoderOptions + crc *xxhash.Digest + + WindowSize uint64 + + // Frame history passed between blocks + history history + + rawInput byteBuffer + + // Byte buffer that can be reused for small input blocks. + bBuf byteBuf + + FrameContentSize uint64 + + DictionaryID *uint32 + HasCheckSum bool + SingleSegment bool +} + +const ( + // MinWindowSize is the minimum Window Size, which is 1 KB. + MinWindowSize = 1 << 10 + + // MaxWindowSize is the maximum encoder window size + // and the default decoder maximum window size. + MaxWindowSize = 1 << 29 +) + +var ( + frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} + skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} +) + +func newFrameDec(o decoderOptions) *frameDec { + if o.maxWindowSize > o.maxDecodedSize { + o.maxWindowSize = o.maxDecodedSize + } + d := frameDec{ + o: o, + } + return &d +} + +// reset will read the frame header and prepare for block decoding. +// If nothing can be read from the input, io.EOF will be returned. +// Any other error indicated that the stream contained data, but +// there was a problem. +func (d *frameDec) reset(br byteBuffer) error { + d.HasCheckSum = false + d.WindowSize = 0 + var signature [4]byte + for { + var err error + // Check if we can read more... + b, err := br.readSmall(1) + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return io.EOF + default: + return err + case nil: + signature[0] = b[0] + } + // Read the rest, don't allow io.ErrUnexpectedEOF + b, err = br.readSmall(3) + switch err { + case io.EOF: + return io.EOF + default: + return err + case nil: + copy(signature[1:], b) + } + + if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 { + if debugDecoder { + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic)) + } + // Break if not skippable frame. + break + } + // Read size to skip + b, err = br.readSmall(4) + if err != nil { + if debugDecoder { + println("Reading Frame Size", err) + } + return err + } + n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + println("Skipping frame with", n, "bytes.") + err = br.skipN(int64(n)) + if err != nil { + if debugDecoder { + println("Reading discarded frame", err) + } + return err + } + } + if !bytes.Equal(signature[:], frameMagic) { + if debugDecoder { + println("Got magic numbers: ", signature, "want:", frameMagic) + } + return ErrMagicMismatch + } + + // Read Frame_Header_Descriptor + fhd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Frame_Header_Descriptor", err) + } + return err + } + d.SingleSegment = fhd&(1<<5) != 0 + + if fhd&(1<<3) != 0 { + return errors.New("reserved bit set on frame header") + } + + // Read Window_Descriptor + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor + d.WindowSize = 0 + if !d.SingleSegment { + wd, err := br.readByte() + if err != nil { + if debugDecoder { + println("Reading Window_Descriptor", err) + } + return err + } + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + windowLog := 10 + (wd >> 3) + windowBase := uint64(1) << windowLog + windowAdd := (windowBase / 8) * uint64(wd&0x7) + d.WindowSize = windowBase + windowAdd + } + + // Read Dictionary_ID + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id + d.DictionaryID = nil + if size := fhd & 3; size != 0 { + if size == 3 { + size = 4 + } + + b, err := br.readSmall(int(size)) + if err != nil { + println("Reading Dictionary_ID", err) + return err + } + var id uint32 + switch size { + case 1: + id = uint32(b[0]) + case 2: + id = uint32(b[0]) | (uint32(b[1]) << 8) + case 4: + id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + } + if debugDecoder { + println("Dict size", size, "ID:", id) + } + if id > 0 { + // ID 0 means "sorry, no dictionary anyway". + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format + d.DictionaryID = &id + } + } + + // Read Frame_Content_Size + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size + var fcsSize int + v := fhd >> 6 + switch v { + case 0: + if d.SingleSegment { + fcsSize = 1 + } + default: + fcsSize = 1 << v + } + d.FrameContentSize = fcsUnknown + if fcsSize > 0 { + b, err := br.readSmall(fcsSize) + if err != nil { + println("Reading Frame content", err) + return err + } + switch fcsSize { + case 1: + d.FrameContentSize = uint64(b[0]) + case 2: + // When FCS_Field_Size is 2, the offset of 256 is added. + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 + case 4: + d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) + case 8: + d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) + d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) + d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) + } + if debugDecoder { + println("Read FCS:", d.FrameContentSize) + } + } + + // Move this to shared. + d.HasCheckSum = fhd&(1<<2) != 0 + if d.HasCheckSum { + if d.crc == nil { + d.crc = xxhash.New() + } + d.crc.Reset() + } + + if d.WindowSize > d.o.maxWindowSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrWindowSizeExceeded + } + + if d.WindowSize == 0 && d.SingleSegment { + // We may not need window in this case. + d.WindowSize = d.FrameContentSize + if d.WindowSize < MinWindowSize { + d.WindowSize = MinWindowSize + } + if d.WindowSize > d.o.maxDecodedSize { + if debugDecoder { + printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) + } + return ErrDecoderSizeExceeded + } + } + + // The minimum Window_Size is 1 KB. + if d.WindowSize < MinWindowSize { + if debugDecoder { + println("got window size: ", d.WindowSize) + } + return ErrWindowSizeTooSmall + } + d.history.windowSize = int(d.WindowSize) + if !d.o.lowMem || d.history.windowSize < maxBlockSize { + // Alloc 2x window size if not low-mem, or very small window size. + d.history.allocFrameBuffer = d.history.windowSize * 2 + } else { + // Alloc with one additional block + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize + } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + + // history contains input - maybe we do something + d.rawInput = br + return nil +} + +// next will start decoding the next block from stream. +func (d *frameDec) next(block *blockDec) error { + if debugDecoder { + println("decoding new block") + } + err := block.reset(d.rawInput, d.WindowSize) + if err != nil { + println("block error:", err) + // Signal the frame decoder we have a problem. + block.sendErr(err) + return err + } + return nil +} + +// checkCRC will check the checksum if the frame has one. +// Will return ErrCRCMismatch if crc check failed, otherwise nil. +func (d *frameDec) checkCRC() error { + if !d.HasCheckSum { + return nil + } + + // We can overwrite upper tmp now + want, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + + if d.o.ignoreChecksum { + return nil + } + + var tmp [4]byte + got := d.crc.Sum64() + // Flip to match file order. + tmp[0] = byte(got >> 0) + tmp[1] = byte(got >> 8) + tmp[2] = byte(got >> 16) + tmp[3] = byte(got >> 24) + + if !bytes.Equal(tmp[:], want) { + if debugDecoder { + println("CRC Check Failed:", tmp[:], "!=", want) + } + return ErrCRCMismatch + } + if debugDecoder { + println("CRC ok", tmp[:]) + } + return nil +} + +// consumeCRC reads the checksum data if the frame has one. +func (d *frameDec) consumeCRC() error { + if d.HasCheckSum { + _, err := d.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + return err + } + } + + return nil +} + +// runDecoder will run the decoder for the remainder of the frame. +func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { + saved := d.history.b + + // We use the history for output to avoid copying it. + d.history.b = dst + d.history.ignoreBuffer = len(dst) + // Store input length, so we only check new data. + crcStart := len(dst) + d.history.decoders.maxSyncLen = 0 + if d.o.limitToCap { + d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) + } + if d.FrameContentSize != fcsUnknown { + if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { + d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) + } + if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) + } + return dst, ErrDecoderSizeExceeded + } + if debugDecoder { + println("maxSyncLen:", d.history.decoders.maxSyncLen) + } + if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { + // Alloc for output + dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) + copy(dst2, dst) + dst = dst2 + } + } + var err error + for { + err = dec.reset(d.rawInput, d.WindowSize) + if err != nil { + break + } + if debugDecoder { + println("next block:", dec) + } + err = dec.decodeBuf(&d.history) + if err != nil { + break + } + if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { + println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) + err = ErrDecoderSizeExceeded + break + } + if d.o.limitToCap && len(d.history.b) > cap(dst) { + println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) + err = ErrDecoderSizeExceeded + break + } + if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) + err = ErrFrameSizeExceeded + break + } + if dec.Last { + break + } + if debugDecoder { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } + } + dst = d.history.b + if err == nil { + if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { + if d.o.ignoreChecksum { + err = d.consumeCRC() + } else { + var n int + n, err = d.crc.Write(dst[crcStart:]) + if err == nil { + if n != len(dst)-crcStart { + err = io.ErrShortWrite + } else { + err = d.checkCRC() + } + } + } + } + } + d.history.b = saved + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go new file mode 100644 index 00000000..4ef7f5a3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -0,0 +1,137 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "fmt" + "io" + "math" + "math/bits" +) + +type frameHeader struct { + ContentSize uint64 + WindowSize uint32 + SingleSegment bool + Checksum bool + DictID uint32 +} + +const maxHeaderSize = 14 + +func (f frameHeader) appendTo(dst []byte) ([]byte, error) { + dst = append(dst, frameMagic...) + var fhd uint8 + if f.Checksum { + fhd |= 1 << 2 + } + if f.SingleSegment { + fhd |= 1 << 5 + } + + var dictIDContent []byte + if f.DictID > 0 { + var tmp [4]byte + if f.DictID < 256 { + fhd |= 1 + tmp[0] = uint8(f.DictID) + dictIDContent = tmp[:1] + } else if f.DictID < 1<<16 { + fhd |= 2 + binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) + dictIDContent = tmp[:2] + } else { + fhd |= 3 + binary.LittleEndian.PutUint32(tmp[:4], f.DictID) + dictIDContent = tmp[:4] + } + } + var fcs uint8 + if f.ContentSize >= 256 { + fcs++ + } + if f.ContentSize >= 65536+256 { + fcs++ + } + if f.ContentSize >= 0xffffffff { + fcs++ + } + + fhd |= fcs << 6 + + dst = append(dst, fhd) + if !f.SingleSegment { + const winLogMin = 10 + windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 + dst = append(dst, uint8(windowLog)) + } + if f.DictID > 0 { + dst = append(dst, dictIDContent...) + } + switch fcs { + case 0: + if f.SingleSegment { + dst = append(dst, uint8(f.ContentSize)) + } + // Unless SingleSegment is set, framessizes < 256 are nto stored. + case 1: + f.ContentSize -= 256 + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) + case 2: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) + case 3: + dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), + uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) + default: + panic("invalid fcs") + } + return dst, nil +} + +const skippableFrameHeader = 4 + 4 + +// calcSkippableFrame will return a total size to be added for written +// to be divisible by multiple. +// The value will always be > skippableFrameHeader. +// The function will panic if written < 0 or wantMultiple <= 0. +func calcSkippableFrame(written, wantMultiple int64) int { + if wantMultiple <= 0 { + panic("wantMultiple <= 0") + } + if written < 0 { + panic("written < 0") + } + leftOver := written % wantMultiple + if leftOver == 0 { + return 0 + } + toAdd := wantMultiple - leftOver + for toAdd < skippableFrameHeader { + toAdd += wantMultiple + } + return int(toAdd) +} + +// skippableFrame will add a skippable frame with a total size of bytes. +// total should be >= skippableFrameHeader and < math.MaxUint32. +func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { + if total == 0 { + return dst, nil + } + if total < skippableFrameHeader { + return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) + } + if int64(total) > math.MaxUint32 { + return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) + } + dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) + f := uint32(total - skippableFrameHeader) + dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) + start := len(dst) + dst = append(dst, make([]byte, f)...) + _, err := io.ReadFull(r, dst[start:]) + return dst, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go new file mode 100644 index 00000000..2f8860a7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go @@ -0,0 +1,307 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +const ( + tablelogAbsoluteMax = 9 +) + +const ( + /*!MEMORY_USAGE : + * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) + * Increasing memory usage improves compression ratio + * Reduced memory usage can improve speed, due to cache effect + * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ + maxMemoryUsage = tablelogAbsoluteMax + 2 + + maxTableLog = maxMemoryUsage - 2 + maxTablesize = 1 << maxTableLog + maxTableMask = (1 << maxTableLog) - 1 + minTablelog = 5 + maxSymbolValue = 255 +) + +// fseDecoder provides temporary storage for compression and decompression. +type fseDecoder struct { + dt [maxTablesize]decSymbol // Decompression table. + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + maxBits uint8 // Maximum number of additional bits + + // used for table creation to avoid allocations. + stateTable [256]uint16 + norm [maxSymbolValue + 1]int16 + preDefined bool +} + +// tableStep returns the next table index. +func tableStep(tableSize uint32) uint32 { + return (tableSize >> 1) + (tableSize >> 3) + 3 +} + +// readNCount will read the symbol distribution so decoding tables can be constructed. +func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { + var ( + charnum uint16 + previous0 bool + ) + if b.remain() < 4 { + return errors.New("input too small") + } + bitStream := b.Uint32NC() + nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog + if nbBits > tablelogAbsoluteMax { + println("Invalid tablelog:", nbBits) + return errors.New("tableLog too large") + } + bitStream >>= 4 + bitCount := uint(4) + + s.actualTableLog = uint8(nbBits) + remaining := int32((1 << nbBits) + 1) + threshold := int32(1 << nbBits) + gotTotal := int32(0) + nbBits++ + + for remaining > 1 && charnum <= maxSymbol { + if previous0 { + //println("prev0") + n0 := charnum + for (bitStream & 0xFFFF) == 0xFFFF { + //println("24 x 0") + n0 += 24 + if r := b.remain(); r > 5 { + b.advance(2) + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + // end of bit stream + bitStream >>= 16 + bitCount += 16 + } + } + //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) + for (bitStream & 3) == 3 { + n0 += 3 + bitStream >>= 2 + bitCount += 2 + } + n0 += uint16(bitStream & 3) + bitCount += 2 + + if n0 > maxSymbolValue { + return errors.New("maxSymbolValue too small") + } + //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) + for charnum < n0 { + s.norm[uint8(charnum)] = 0 + charnum++ + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> bitCount + } else { + bitStream >>= 2 + } + } + + max := (2*threshold - 1) - remaining + var count int32 + + if int32(bitStream)&(threshold-1) < max { + count = int32(bitStream) & (threshold - 1) + if debugAsserts && nbBits < 1 { + panic("nbBits underflow") + } + bitCount += nbBits - 1 + } else { + count = int32(bitStream) & (2*threshold - 1) + if count >= threshold { + count -= max + } + bitCount += nbBits + } + + // extra accuracy + count-- + if count < 0 { + // -1 means +1 + remaining += count + gotTotal -= count + } else { + remaining -= count + gotTotal += count + } + s.norm[charnum&0xff] = int16(count) + charnum++ + previous0 = count == 0 + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { + b.advance(bitCount >> 3) + bitCount &= 7 + // The check above should make sure we can read 32 bits + bitStream = b.Uint32NC() >> (bitCount & 31) + } else { + bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) + b.off = len(b.b) - 4 + bitStream = b.Uint32() >> (bitCount & 31) + } + } + s.symbolLen = charnum + if s.symbolLen <= 1 { + return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) + } + if s.symbolLen > maxSymbolValue+1 { + return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) + } + if remaining != 1 { + return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) + } + if bitCount > 32 { + return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) + } + if gotTotal != 1<> 3) + return s.buildDtable() +} + +func (s *fseDecoder) mustReadFrom(r io.Reader) { + fatalErr := func(err error) { + if err != nil { + panic(err) + } + } + // dt [maxTablesize]decSymbol // Decompression table. + // symbolLen uint16 // Length of active part of the symbol table. + // actualTableLog uint8 // Selected tablelog. + // maxBits uint8 // Maximum number of additional bits + // // used for table creation to avoid allocations. + // stateTable [256]uint16 + // norm [maxSymbolValue + 1]int16 + // preDefined bool + fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) + fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) +} + +// decSymbol contains information about a state entry, +// Including the state offset base, the output symbol and +// the number of bits to read for the low part of the destination state. +// Using a composite uint64 is faster than a struct with separate members. +type decSymbol uint64 + +func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { + return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) +} + +func (d decSymbol) nbBits() uint8 { + return uint8(d) +} + +func (d decSymbol) addBits() uint8 { + return uint8(d >> 8) +} + +func (d decSymbol) newState() uint16 { + return uint16(d >> 16) +} + +func (d decSymbol) baselineInt() int { + return int(d >> 32) +} + +func (d *decSymbol) setNBits(nBits uint8) { + const mask = 0xffffffffffffff00 + *d = (*d & mask) | decSymbol(nBits) +} + +func (d *decSymbol) setAddBits(addBits uint8) { + const mask = 0xffffffffffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) +} + +func (d *decSymbol) setNewState(state uint16) { + const mask = 0xffffffff0000ffff + *d = (*d & mask) | decSymbol(state)<<16 +} + +func (d *decSymbol) setExt(addBits uint8, baseline uint32) { + const mask = 0xffff00ff + *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) +} + +// decSymbolValue returns the transformed decSymbol for the given symbol. +func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { + if int(symb) >= len(t) { + return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) + } + lu := t[symb] + return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil +} + +// setRLE will set the decoder til RLE mode. +func (s *fseDecoder) setRLE(symbol decSymbol) { + s.actualTableLog = 0 + s.maxBits = symbol.addBits() + s.dt[0] = symbol +} + +// transform will transform the decoder table into a table usable for +// decoding without having to apply the transformation while decoding. +// The state will contain the base value and the number of bits to read. +func (s *fseDecoder) transform(t []baseOffset) error { + tableSize := uint16(1 << s.actualTableLog) + s.maxBits = 0 + for i, v := range s.dt[:tableSize] { + add := v.addBits() + if int(add) >= len(t) { + return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) + } + lu := t[add] + if lu.addBits > s.maxBits { + s.maxBits = lu.addBits + } + v.setExt(lu.addBits, lu.baseLine) + s.dt[i] = v + } + return nil +} + +type fseState struct { + dt []decSymbol + state decSymbol +} + +// Initialize and decodeAsync first state and symbol. +func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { + s.dt = dt + br.fill() + s.state = dt[br.getBits(tableLog)] +} + +// final returns the current state symbol without decoding the next. +func (s decSymbol) final() (int, uint8) { + return s.baselineInt(), s.addBits() +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go new file mode 100644 index 00000000..d04a829b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go @@ -0,0 +1,65 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" +) + +type buildDtableAsmContext struct { + // inputs + stateTable *uint16 + norm *int16 + dt *uint64 + + // outputs --- set by the procedure in the case of error; + // for interpretation please see the error handling part below + errParam1 uint64 + errParam2 uint64 +} + +// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. +// Function returns non-zero exit code on error. +// +//go:noescape +func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int + +// please keep in sync with _generate/gen_fse.go +const ( + errorCorruptedNormalizedCounter = 1 + errorNewStateTooBig = 2 + errorNewStateNoBits = 3 +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + ctx := buildDtableAsmContext{ + stateTable: &s.stateTable[0], + norm: &s.norm[0], + dt: (*uint64)(&s.dt[0]), + } + code := buildDtable_asm(s, &ctx) + + if code != 0 { + switch code { + case errorCorruptedNormalizedCounter: + position := ctx.errParam1 + return fmt.Errorf("corrupted input (position=%d, expected 0)", position) + + case errorNewStateTooBig: + newState := decSymbol(ctx.errParam1) + size := ctx.errParam2 + return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) + + case errorNewStateNoBits: + newState := decSymbol(ctx.errParam1) + oldState := decSymbol(ctx.errParam2) + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) + + default: + return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s new file mode 100644 index 00000000..bcde3986 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s @@ -0,0 +1,126 @@ +// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int +TEXT ·buildDtable_asm(SB), $0-24 + MOVQ ctx+8(FP), CX + MOVQ s+0(FP), DI + + // Load values + MOVBQZX 4098(DI), DX + XORQ AX, AX + BTSQ DX, AX + MOVQ (CX), BX + MOVQ 16(CX), SI + LEAQ -1(AX), R8 + MOVQ 8(CX), CX + MOVWQZX 4096(DI), DI + + // End load values + // Init, lay down lowprob symbols + XORQ R9, R9 + JMP init_main_loop_condition + +init_main_loop: + MOVWQSX (CX)(R9*2), R10 + CMPW R10, $-1 + JNE do_not_update_high_threshold + MOVB R9, 1(SI)(R8*8) + DECQ R8 + MOVQ $0x0000000000000001, R10 + +do_not_update_high_threshold: + MOVW R10, (BX)(R9*2) + INCQ R9 + +init_main_loop_condition: + CMPQ R9, DI + JL init_main_loop + + // Spread symbols + // Calculate table step + MOVQ AX, R9 + SHRQ $0x01, R9 + MOVQ AX, R10 + SHRQ $0x03, R10 + LEAQ 3(R9)(R10*1), R9 + + // Fill add bits values + LEAQ -1(AX), R10 + XORQ R11, R11 + XORQ R12, R12 + JMP spread_main_loop_condition + +spread_main_loop: + XORQ R13, R13 + MOVWQSX (CX)(R12*2), R14 + JMP spread_inner_loop_condition + +spread_inner_loop: + MOVB R12, 1(SI)(R11*8) + +adjust_position: + ADDQ R9, R11 + ANDQ R10, R11 + CMPQ R11, R8 + JG adjust_position + INCQ R13 + +spread_inner_loop_condition: + CMPQ R13, R14 + JL spread_inner_loop + INCQ R12 + +spread_main_loop_condition: + CMPQ R12, DI + JL spread_main_loop + TESTQ R11, R11 + JZ spread_check_ok + MOVQ ctx+8(FP), AX + MOVQ R11, 24(AX) + MOVQ $+1, ret+16(FP) + RET + +spread_check_ok: + // Build Decoding table + XORQ DI, DI + +build_table_main_table: + MOVBQZX 1(SI)(DI*8), CX + MOVWQZX (BX)(CX*2), R8 + LEAQ 1(R8), R9 + MOVW R9, (BX)(CX*2) + MOVQ R8, R9 + BSRQ R9, R9 + MOVQ DX, CX + SUBQ R9, CX + SHLQ CL, R8 + SUBQ AX, R8 + MOVB CL, (SI)(DI*8) + MOVW R8, 2(SI)(DI*8) + CMPQ R8, AX + JLE build_table_check1_ok + MOVQ ctx+8(FP), CX + MOVQ R8, 24(CX) + MOVQ AX, 32(CX) + MOVQ $+2, ret+16(FP) + RET + +build_table_check1_ok: + TESTB CL, CL + JNZ build_table_check2_ok + CMPW R8, DI + JNE build_table_check2_ok + MOVQ ctx+8(FP), AX + MOVQ R8, 24(AX) + MOVQ DI, 32(AX) + MOVQ $+3, ret+16(FP) + RET + +build_table_check2_ok: + INCQ DI + CMPQ DI, AX + JL build_table_main_table + MOVQ $+0, ret+16(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go new file mode 100644 index 00000000..332e51fe --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -0,0 +1,72 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "errors" + "fmt" +) + +// buildDtable will build the decoding table. +func (s *fseDecoder) buildDtable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + symbolNext := s.stateTable[:256] + + // Init, lay down lowprob symbols + { + for i, v := range s.norm[:s.symbolLen] { + if v == -1 { + s.dt[highThreshold].setAddBits(uint8(i)) + highThreshold-- + symbolNext[i] = 1 + } else { + symbolNext[i] = uint16(v) + } + } + } + + // Spread symbols + { + tableMask := tableSize - 1 + step := tableStep(tableSize) + position := uint32(0) + for ss, v := range s.norm[:s.symbolLen] { + for i := 0; i < int(v); i++ { + s.dt[position].setAddBits(uint8(ss)) + position = (position + step) & tableMask + for position > highThreshold { + // lowprob area + position = (position + step) & tableMask + } + } + } + if position != 0 { + // position must reach all cells once, otherwise normalizedCounter is incorrect + return errors.New("corrupted input (position != 0)") + } + } + + // Build Decoding table + { + tableSize := uint16(1 << s.actualTableLog) + for u, v := range s.dt[:tableSize] { + symbol := v.addBits() + nextState := symbolNext[symbol] + symbolNext[symbol] = nextState + 1 + nBits := s.actualTableLog - byte(highBits(uint32(nextState))) + s.dt[u&maxTableMask].setNBits(nBits) + newState := (nextState << nBits) - tableSize + if newState > tableSize { + return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) + } + if newState == uint16(u) && nBits == 0 { + // Seems weird that this is possible with nbits > 0. + return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) + } + s.dt[u&maxTableMask].setNewState(newState) + } + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go new file mode 100644 index 00000000..ab26326a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -0,0 +1,701 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "math" +) + +const ( + // For encoding we only support up to + maxEncTableLog = 8 + maxEncTablesize = 1 << maxTableLog + maxEncTableMask = (1 << maxTableLog) - 1 + minEncTablelog = 5 + maxEncSymbolValue = maxMatchLengthSymbol +) + +// Scratch provides temporary storage for compression and decompression. +type fseEncoder struct { + symbolLen uint16 // Length of active part of the symbol table. + actualTableLog uint8 // Selected tablelog. + ct cTable // Compression tables. + maxCount int // count of the most probable symbol + zeroBits bool // no bits has prob > 50%. + clearCount bool // clear count + useRLE bool // This encoder is for RLE + preDefined bool // This encoder is predefined. + reUsed bool // Set to know when the encoder has been reused. + rleVal uint8 // RLE Symbol + maxBits uint8 // Maximum output bits after transform. + + // TODO: Technically zstd should be fine with 64 bytes. + count [256]uint32 + norm [256]int16 +} + +// cTable contains tables used for compression. +type cTable struct { + tableSymbol []byte + stateTable []uint16 + symbolTT []symbolTransform +} + +// symbolTransform contains the state transform for a symbol. +type symbolTransform struct { + deltaNbBits uint32 + deltaFindState int16 + outBits uint8 +} + +// String prints values as a human readable string. +func (s symbolTransform) String() string { + return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) +} + +// Histogram allows to populate the histogram and skip that step in the compression, +// It otherwise allows to inspect the histogram when compression is done. +// To indicate that you have populated the histogram call HistogramFinished +// with the value of the highest populated symbol, as well as the number of entries +// in the most populated entry. These are accepted at face value. +func (s *fseEncoder) Histogram() *[256]uint32 { + return &s.count +} + +// HistogramFinished can be called to indicate that the histogram has been populated. +// maxSymbol is the index of the highest set symbol of the next data segment. +// maxCount is the number of entries in the most populated entry. +// These are accepted at face value. +func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { + s.maxCount = maxCount + s.symbolLen = uint16(maxSymbol) + 1 + s.clearCount = maxCount != 0 +} + +// allocCtable will allocate tables needed for compression. +// If existing tables a re big enough, they are simply re-used. +func (s *fseEncoder) allocCtable() { + tableSize := 1 << s.actualTableLog + // get tableSymbol that is big enough. + if cap(s.ct.tableSymbol) < tableSize { + s.ct.tableSymbol = make([]byte, tableSize) + } + s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] + + ctSize := tableSize + if cap(s.ct.stateTable) < ctSize { + s.ct.stateTable = make([]uint16, ctSize) + } + s.ct.stateTable = s.ct.stateTable[:ctSize] + + if cap(s.ct.symbolTT) < 256 { + s.ct.symbolTT = make([]symbolTransform, 256) + } + s.ct.symbolTT = s.ct.symbolTT[:256] +} + +// buildCTable will populate the compression table so it is ready to be used. +func (s *fseEncoder) buildCTable() error { + tableSize := uint32(1 << s.actualTableLog) + highThreshold := tableSize - 1 + var cumul [256]int16 + + s.allocCtable() + tableSymbol := s.ct.tableSymbol[:tableSize] + // symbol start positions + { + cumul[0] = 0 + for ui, v := range s.norm[:s.symbolLen-1] { + u := byte(ui) // one less than reference + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = u + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + } + // Encode last symbol separately to avoid overflowing u + u := int(s.symbolLen - 1) + v := s.norm[s.symbolLen-1] + if v == -1 { + // Low proba symbol + cumul[u+1] = cumul[u] + 1 + tableSymbol[highThreshold] = byte(u) + highThreshold-- + } else { + cumul[u+1] = cumul[u] + v + } + if uint32(cumul[s.symbolLen]) != tableSize { + return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) + } + cumul[s.symbolLen] = int16(tableSize) + 1 + } + // Spread symbols + s.zeroBits = false + { + step := tableStep(tableSize) + tableMask := tableSize - 1 + var position uint32 + // if any symbol > largeLimit, we may have 0 bits output. + largeLimit := int16(1 << (s.actualTableLog - 1)) + for ui, v := range s.norm[:s.symbolLen] { + symbol := byte(ui) + if v > largeLimit { + s.zeroBits = true + } + for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + tableSymbol[position] = symbol + position = (position + step) & tableMask + for position > highThreshold { + position = (position + step) & tableMask + } /* Low proba area */ + } + } + + // Check if we have gone through all positions + if position != 0 { + return errors.New("position!=0") + } + } + + // Build table + table := s.ct.stateTable + { + tsi := int(tableSize) + for u, v := range tableSymbol { + // TableU16 : sorted by symbol order; gives next state value + table[cumul[v]] = uint16(tsi + u) + cumul[v]++ + } + } + + // Build Symbol Transformation Table + { + total := int16(0) + symbolTT := s.ct.symbolTT[:s.symbolLen] + tableLog := s.actualTableLog + tl := (uint32(tableLog) << 16) - (1 << tableLog) + for i, v := range s.norm[:s.symbolLen] { + switch v { + case 0: + case -1, 1: + symbolTT[i].deltaNbBits = tl + symbolTT[i].deltaFindState = total - 1 + total++ + default: + maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) + minStatePlus := uint32(v) << maxBitsOut + symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus + symbolTT[i].deltaFindState = total - v + total += v + } + } + if total != int16(tableSize) { + return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) + } + } + return nil +} + +var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} + +func (s *fseEncoder) setRLE(val byte) { + s.allocCtable() + s.actualTableLog = 0 + s.ct.stateTable = s.ct.stateTable[:1] + s.ct.symbolTT[val] = symbolTransform{ + deltaFindState: 0, + deltaNbBits: 0, + } + if debugEncoder { + println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) + } + s.rleVal = val + s.useRLE = true +} + +// setBits will set output bits for the transform. +// if nil is provided, the number of bits is equal to the index. +func (s *fseEncoder) setBits(transform []byte) { + if s.reUsed || s.preDefined { + return + } + if s.useRLE { + if transform == nil { + s.ct.symbolTT[s.rleVal].outBits = s.rleVal + s.maxBits = s.rleVal + return + } + s.maxBits = transform[s.rleVal] + s.ct.symbolTT[s.rleVal].outBits = s.maxBits + return + } + if transform == nil { + for i := range s.ct.symbolTT[:s.symbolLen] { + s.ct.symbolTT[i].outBits = uint8(i) + } + s.maxBits = uint8(s.symbolLen - 1) + return + } + s.maxBits = 0 + for i, v := range transform[:s.symbolLen] { + s.ct.symbolTT[i].outBits = v + if v > s.maxBits { + // We could assume bits always going up, but we play safe. + s.maxBits = v + } + } +} + +// normalizeCount will normalize the count of the symbols so +// the total is equal to the table size. +// If successful, compression tables will also be made ready. +func (s *fseEncoder) normalizeCount(length int) error { + if s.reUsed { + return nil + } + s.optimalTableLog(length) + var ( + tableLog = s.actualTableLog + scale = 62 - uint64(tableLog) + step = (1 << 62) / uint64(length) + vStep = uint64(1) << (scale - 20) + stillToDistribute = int16(1 << tableLog) + largest int + largestP int16 + lowThreshold = (uint32)(length >> tableLog) + ) + if s.maxCount == length { + s.useRLE = true + return nil + } + s.useRLE = false + for i, cnt := range s.count[:s.symbolLen] { + // already handled + // if (count[s] == s.length) return 0; /* rle special case */ + + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + stillToDistribute-- + } else { + proba := (int16)((uint64(cnt) * step) >> scale) + if proba < 8 { + restToBeat := vStep * uint64(rtbTable[proba]) + v := uint64(cnt)*step - (uint64(proba) << scale) + if v > restToBeat { + proba++ + } + } + if proba > largestP { + largestP = proba + largest = i + } + s.norm[i] = proba + stillToDistribute -= proba + } + } + + if -stillToDistribute >= (s.norm[largest] >> 1) { + // corner case, need another normalization method + err := s.normalizeCount2(length) + if err != nil { + return err + } + if debugAsserts { + err = s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() + } + s.norm[largest] += stillToDistribute + if debugAsserts { + err := s.validateNorm() + if err != nil { + return err + } + } + return s.buildCTable() +} + +// Secondary normalization method. +// To be used when primary method fails. +func (s *fseEncoder) normalizeCount2(length int) error { + const notYetAssigned = -2 + var ( + distributed uint32 + total = uint32(length) + tableLog = s.actualTableLog + lowThreshold = total >> tableLog + lowOne = (total * 3) >> (tableLog + 1) + ) + for i, cnt := range s.count[:s.symbolLen] { + if cnt == 0 { + s.norm[i] = 0 + continue + } + if cnt <= lowThreshold { + s.norm[i] = -1 + distributed++ + total -= cnt + continue + } + if cnt <= lowOne { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + s.norm[i] = notYetAssigned + } + toDistribute := (1 << tableLog) - distributed + + if (total / toDistribute) > lowOne { + // risk of rounding to zero + lowOne = (total * 3) / (toDistribute * 2) + for i, cnt := range s.count[:s.symbolLen] { + if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { + s.norm[i] = 1 + distributed++ + total -= cnt + continue + } + } + toDistribute = (1 << tableLog) - distributed + } + if distributed == uint32(s.symbolLen)+1 { + // all values are pretty poor; + // probably incompressible data (should have already been detected); + // find max, then give all remaining points to max + var maxV int + var maxC uint32 + for i, cnt := range s.count[:s.symbolLen] { + if cnt > maxC { + maxV = i + maxC = cnt + } + } + s.norm[maxV] += int16(toDistribute) + return nil + } + + if total == 0 { + // all of the symbols were low enough for the lowOne or lowThreshold + for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { + if s.norm[i] > 0 { + toDistribute-- + s.norm[i]++ + } + } + return nil + } + + var ( + vStepLog = 62 - uint64(tableLog) + mid = uint64((1 << (vStepLog - 1)) - 1) + rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining + tmpTotal = mid + ) + for i, cnt := range s.count[:s.symbolLen] { + if s.norm[i] == notYetAssigned { + var ( + end = tmpTotal + uint64(cnt)*rStep + sStart = uint32(tmpTotal >> vStepLog) + sEnd = uint32(end >> vStepLog) + weight = sEnd - sStart + ) + if weight < 1 { + return errors.New("weight < 1") + } + s.norm[i] = int16(weight) + tmpTotal = end + } + } + return nil +} + +// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog +func (s *fseEncoder) optimalTableLog(length int) { + tableLog := uint8(maxEncTableLog) + minBitsSrc := highBit(uint32(length)) + 1 + minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 + minBits := uint8(minBitsSymbols) + if minBitsSrc < minBitsSymbols { + minBits = uint8(minBitsSrc) + } + + maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 + if maxBitsSrc < tableLog { + // Accuracy can be reduced + tableLog = maxBitsSrc + } + if minBits > tableLog { + tableLog = minBits + } + // Need a minimum to safely represent all symbol values + if tableLog < minEncTablelog { + tableLog = minEncTablelog + } + if tableLog > maxEncTableLog { + tableLog = maxEncTableLog + } + s.actualTableLog = tableLog +} + +// validateNorm validates the normalized histogram table. +func (s *fseEncoder) validateNorm() (err error) { + var total int + for _, v := range s.norm[:s.symbolLen] { + if v >= 0 { + total += int(v) + } else { + total -= int(v) + } + } + defer func() { + if err == nil { + return + } + fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) + for i, v := range s.norm[:s.symbolLen] { + fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) + } + }() + if total != (1 << s.actualTableLog) { + return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 + + // Write Table Size + bitStream = uint32(tableLog - minEncTablelog) + bitCount = uint(4) + remaining = int16(tableSize + 1) /* +1 for extra accuracy */ + threshold = int16(tableSize) + nbBits = uint(tableLog + 1) + outP = len(out) + ) + if cap(out) < outP+maxHeaderSize { + out = append(out, make([]byte, maxHeaderSize*3)...) + out = out[:len(out)-maxHeaderSize*3] + } + out = out[:outP+maxHeaderSize] + + // stops at 1 + for remaining > 1 { + if previous0 { + start := charnum + for s.norm[charnum] == 0 { + charnum++ + } + for charnum >= start+24 { + start += 24 + bitStream += uint32(0xFFFF) << bitCount + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + } + for charnum >= start+3 { + start += 3 + bitStream += 3 << bitCount + bitCount += 2 + } + bitStream += uint32(charnum-start) << bitCount + bitCount += 2 + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + count := s.norm[charnum] + charnum++ + max := (2*threshold - 1) - remaining + if count < 0 { + remaining += count + } else { + remaining -= count + } + count++ // +1 for extra accuracy + if count >= threshold { + count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ + } + bitStream += uint32(count) << bitCount + bitCount += nbBits + if count < max { + bitCount-- + } + + previous0 = count == 1 + if remaining < 1 { + return nil, errors.New("internal error: remaining < 1") + } + for remaining < threshold { + nbBits-- + threshold >>= 1 + } + + if bitCount > 16 { + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += 2 + bitStream >>= 16 + bitCount -= 16 + } + } + + if outP+2 > len(out) { + return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) + } + out[outP] = byte(bitStream) + out[outP+1] = byte(bitStream >> 8) + outP += int((bitCount + 7) / 8) + + if charnum > s.symbolLen { + return nil, errors.New("internal error: charnum > s.symbolLen") + } + return out[:outP], nil +} + +// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) +// note 1 : assume symbolValue is valid (<= maxSymbolValue) +// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * +func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { + minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 + threshold := (minNbBits + 1) << 16 + if debugAsserts { + if !(s.actualTableLog < 16) { + panic("!s.actualTableLog < 16") + } + // ensure enough room for renormalization double shift + if !(uint8(accuracyLog) < 31-s.actualTableLog) { + panic("!uint8(accuracyLog) < 31-s.actualTableLog") + } + } + tableSize := uint32(1) << s.actualTableLog + deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) + // linear interpolation (very approximate) + normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog + bitMultiplier := uint32(1) << accuracyLog + if debugAsserts { + if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { + panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") + } + if normalizedDeltaFromThreshold > bitMultiplier { + panic("normalizedDeltaFromThreshold > bitMultiplier") + } + } + return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold +} + +// Returns the cost in bits of encoding the distribution in count using ctable. +// Histogram should only be up to the last non-zero symbol. +// Returns an -1 if ctable cannot represent all the symbols in count. +func (s *fseEncoder) approxSize(hist []uint32) uint32 { + if int(s.symbolLen) < len(hist) { + // More symbols than we have. + return math.MaxUint32 + } + if s.useRLE { + // We will never reuse RLE encoders. + return math.MaxUint32 + } + const kAccuracyLog = 8 + badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog + var cost uint32 + for i, v := range hist { + if v == 0 { + continue + } + if s.norm[i] == 0 { + return math.MaxUint32 + } + bitCost := s.bitCost(uint8(i), kAccuracyLog) + if bitCost > badCost { + return math.MaxUint32 + } + cost += v * bitCost + } + return cost >> kAccuracyLog +} + +// maxHeaderSize returns the maximum header size in bits. +// This is not exact size, but we want a penalty for new tables anyway. +func (s *fseEncoder) maxHeaderSize() uint32 { + if s.preDefined { + return 0 + } + if s.useRLE { + return 8 + } + return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 +} + +// cState contains the compression state of a stream. +type cState struct { + bw *bitWriter + stateTable []uint16 + state uint16 +} + +// init will initialize the compression state to the first symbol of the stream. +func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { + c.bw = bw + c.stateTable = ct.stateTable + if len(c.stateTable) == 1 { + // RLE + c.stateTable[0] = uint16(0) + c.state = 0 + return + } + nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 + im := int32((nbBitsOut << 16) - first.deltaNbBits) + lu := (im >> nbBitsOut) + int32(first.deltaFindState) + c.state = c.stateTable[lu] +} + +// flush will write the tablelog to the output and flush the remaining full bytes. +func (c *cState) flush(tableLog uint8) { + c.bw.flush32() + c.bw.addBits16NC(c.state, tableLog) +} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go new file mode 100644 index 00000000..474cb77d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go @@ -0,0 +1,158 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "fmt" + "math" + "sync" +) + +var ( + // fsePredef are the predefined fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredef [3]fseDecoder + + // fsePredefEnc are the predefined encoder based on fse tables as defined here: + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + // These values are already transformed. + fsePredefEnc [3]fseEncoder + + // symbolTableX contain the transformations needed for each type as defined in + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + symbolTableX [3][]baseOffset + + // maxTableSymbol is the biggest supported symbol for each table type + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets + maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} + + // bitTables is the bits table for each table. + bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} +) + +type tableIndex uint8 + +const ( + // indexes for fsePredef and symbolTableX + tableLiteralLengths tableIndex = 0 + tableOffsets tableIndex = 1 + tableMatchLengths tableIndex = 2 + + maxLiteralLengthSymbol = 35 + maxOffsetLengthSymbol = 30 + maxMatchLengthSymbol = 52 +) + +// baseOffset is used for calculating transformations. +type baseOffset struct { + baseLine uint32 + addBits uint8 +} + +// fillBase will precalculate base offsets with the given bit distributions. +func fillBase(dst []baseOffset, base uint32, bits ...uint8) { + if len(bits) != len(dst) { + panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) + } + for i, bit := range bits { + if base > math.MaxInt32 { + panic("invalid decoding table, base overflows int32") + } + + dst[i] = baseOffset{ + baseLine: base, + addBits: bit, + } + base += 1 << bit + } +} + +var predef sync.Once + +func initPredefined() { + predef.Do(func() { + // Literals length codes + tmp := make([]baseOffset, 36) + for i := range tmp[:16] { + tmp[i] = baseOffset{ + baseLine: uint32(i), + addBits: 0, + } + } + fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableLiteralLengths] = tmp + + // Match length codes + tmp = make([]baseOffset, 53) + for i := range tmp[:32] { + tmp[i] = baseOffset{ + // The transformation adds the 3 length. + baseLine: uint32(i) + 3, + addBits: 0, + } + } + fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) + symbolTableX[tableMatchLengths] = tmp + + // Offset codes + tmp = make([]baseOffset, maxOffsetBits+1) + tmp[1] = baseOffset{ + baseLine: 1, + addBits: 1, + } + fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) + symbolTableX[tableOffsets] = tmp + + // Fill predefined tables and transform them. + // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions + for i := range fsePredef[:] { + f := &fsePredef[i] + switch tableIndex(i) { + case tableLiteralLengths: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 + f.actualTableLog = 6 + copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, + -1, -1, -1, -1}) + f.symbolLen = 36 + case tableOffsets: + // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 + f.actualTableLog = 5 + copy(f.norm[:], []int16{ + 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) + f.symbolLen = 29 + case tableMatchLengths: + //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 + f.actualTableLog = 6 + copy(f.norm[:], []int16{ + 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, + -1, -1, -1, -1, -1}) + f.symbolLen = 53 + } + if err := f.buildDtable(); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + if err := f.transform(symbolTableX[i]); err != nil { + panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) + } + f.preDefined = true + + // Create encoder as well + enc := &fsePredefEnc[i] + copy(enc.norm[:], f.norm[:]) + enc.symbolLen = f.symbolLen + enc.actualTableLog = f.actualTableLog + if err := enc.buildCTable(); err != nil { + panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) + } + enc.setBits(bitTables[i]) + enc.preDefined = true + } + }) +} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go new file mode 100644 index 00000000..5d73c21e --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/hash.go @@ -0,0 +1,35 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +const ( + prime3bytes = 506832829 + prime4bytes = 2654435761 + prime5bytes = 889523592379 + prime6bytes = 227718039650203 + prime7bytes = 58295818150454627 + prime8bytes = 0xcf1bbcdcb7a56463 +) + +// hashLen returns a hash of the lowest mls bytes of with length output bits. +// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. +// length should always be < 32. +// Preferably length and mls should be a constant for inlining. +func hashLen(u uint64, length, mls uint8) uint32 { + switch mls { + case 3: + return (uint32(u<<8) * prime3bytes) >> (32 - length) + case 5: + return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) + case 6: + return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) + case 7: + return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) + case 8: + return uint32((u * prime8bytes) >> (64 - length)) + default: + return (uint32(u) * prime4bytes) >> (32 - length) + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go new file mode 100644 index 00000000..09164856 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -0,0 +1,116 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "github.com/klauspost/compress/huff0" +) + +// history contains the information transferred between blocks. +type history struct { + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression + decoders sequenceDecs + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict +} + +// reset will reset the history to initial state of a frame. +// The history must already have been initialized to the desired size. +func (h *history) reset() { + h.b = h.b[:0] + h.ignoreBuffer = 0 + h.error = false + h.recentOffsets = [3]int{1, 4, 8} + h.decoders.freeDecoders() + h.decoders = sequenceDecs{br: h.decoders.br} + h.freeHuffDecoder() + h.huffTree = nil + h.dict = nil + //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) +} + +func (h *history) freeHuffDecoder() { + if h.huffTree != nil { + if h.dict == nil || h.dict.litEnc != h.huffTree { + huffDecoderPool.Put(h.huffTree) + h.huffTree = nil + } + } +} + +func (h *history) setDict(dict *dict) { + if dict == nil { + return + } + h.dict = dict + h.decoders.litLengths = dict.llDec + h.decoders.offsets = dict.ofDec + h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content + h.recentOffsets = dict.offsets + h.huffTree = dict.litEnc +} + +// append bytes to history. +// This function will make sure there is space for it, +// if the buffer has been allocated with enough extra space. +func (h *history) append(b []byte) { + if len(b) >= h.windowSize { + // Discard all history by simply overwriting + h.b = h.b[:h.windowSize] + copy(h.b, b[len(b)-h.windowSize:]) + return + } + + // If there is space, append it. + if len(b) < cap(h.b)-len(h.b) { + h.b = append(h.b, b...) + return + } + + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(b) + len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] + copy(h.b[h.windowSize-len(b):], b) +} + +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + +// append bytes to history without ever discarding anything. +func (h *history) appendKeep(b []byte) { + h.b = append(h.b, b...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt new file mode 100644 index 00000000..24b53065 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md new file mode 100644 index 00000000..69aa3bb5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -0,0 +1,58 @@ +# xxhash + +VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. + + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) +[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +This package provides a straightforward API: + +``` +func Sum64(b []byte) uint64 +func Sum64String(s string) uint64 +type Digest struct{ ... } + func New() *Digest +``` + +The `Digest` type implements hash.Hash64. Its key methods are: + +``` +func (*Digest) Write([]byte) (int, error) +func (*Digest) WriteString(string) (int, error) +func (*Digest) Sum64() uint64 +``` + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64. + +| input size | purego | asm | +| --- | --- | --- | +| 5 B | 979.66 MB/s | 1291.17 MB/s | +| 100 B | 7475.26 MB/s | 7973.40 MB/s | +| 4 KB | 17573.46 MB/s | 17602.65 MB/s | +| 10 MB | 17131.46 MB/s | 17142.16 MB/s | + +These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using +the following commands under Go 1.11.2: + +``` +$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' +$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) +- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go new file mode 100644 index 00000000..2c112a0a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -0,0 +1,237 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. + +package xxhash + +import ( + "encoding/binary" + "errors" + "math/bits" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +// Digest implements hash.Hash64. +type Digest struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total uint64 + mem [32]byte + n int // how much of mem is used +} + +// New creates a new Digest that computes the 64-bit xxHash algorithm. +func New() *Digest { + var d Digest + d.Reset() + return &d +} + +// Reset clears the Digest's state so that it can be reused. +func (d *Digest) Reset() { + d.v1 = prime1v + prime2 + d.v2 = prime2 + d.v3 = 0 + d.v4 = -prime1v + d.total = 0 + d.n = 0 +} + +// Size always returns 8 bytes. +func (d *Digest) Size() int { return 8 } + +// BlockSize always returns 32 bytes. +func (d *Digest) BlockSize() int { return 32 } + +// Write adds more data to d. It always returns len(b), nil. +func (d *Digest) Write(b []byte) (n int, err error) { + n = len(b) + d.total += uint64(n) + + if d.n+n < 32 { + // This new data doesn't even fill the current block. + copy(d.mem[d.n:], b) + d.n += n + return + } + + if d.n > 0 { + // Finish off the partial block. + copy(d.mem[d.n:], b) + d.v1 = round(d.v1, u64(d.mem[0:8])) + d.v2 = round(d.v2, u64(d.mem[8:16])) + d.v3 = round(d.v3, u64(d.mem[16:24])) + d.v4 = round(d.v4, u64(d.mem[24:32])) + b = b[32-d.n:] + d.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + nw := writeBlocks(d, b) + b = b[nw:] + } + + // Store any remaining partial block. + copy(d.mem[:], b) + d.n = len(b) + + return +} + +// Sum appends the current hash to b and returns the resulting slice. +func (d *Digest) Sum(b []byte) []byte { + s := d.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +// Sum64 returns the current hash. +func (d *Digest) Sum64() uint64 { + var h uint64 + + if d.total >= 32 { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = d.v3 + prime5 + } + + h += d.total + + i, end := 0, d.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(d.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(d.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(d.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +const ( + magic = "xxh\x06" + marshaledSize = len(magic) + 8*5 + 32 +) + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d *Digest) MarshalBinary() ([]byte, error) { + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + b = appendUint64(b, d.v1) + b = appendUint64(b, d.v2) + b = appendUint64(b, d.v3) + b = appendUint64(b, d.v4) + b = appendUint64(b, d.total) + b = append(b, d.mem[:d.n]...) + b = b[:len(b)+len(d.mem)-d.n] + return b, nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (d *Digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("xxhash: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("xxhash: invalid hash state size") + } + b = b[len(magic):] + b, d.v1 = consumeUint64(b) + b, d.v2 = consumeUint64(b) + b, d.v3 = consumeUint64(b) + b, d.v4 = consumeUint64(b) + b, d.total = consumeUint64(b) + copy(d.mem[:], b) + d.n = int(d.total % uint64(len(d.mem))) + return nil +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.LittleEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := u64(b) + return b[8:], x +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s new file mode 100644 index 00000000..cea17856 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -0,0 +1,216 @@ +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +#include "textflag.h" + +// Register allocation: +// AX h +// SI pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// DI prime4v + +// round reads from and advances the buffer pointer in SI. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (SI), R12 \ + ADDQ $8, SI \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ DI, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), DI + + // Load slice. + MOVQ b_base+0(FP), SI + MOVQ b_len+8(FP), DX + LEAQ (SI)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until SI > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ SI, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. + ADDQ $24, BX + + CMPQ SI, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (SI), R8 + ADDQ $8, SI + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ DI, AX + + CMPQ SI, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ SI, BX + JG singles + + MOVL (SI), R8 + ADDQ $4, SI + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ SI, BX + JGE finalize + +singlesLoop: + MOVBQZX (SI), R12 + ADDQ $1, SI + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ SI, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the d pointer. + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT, $0-40 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ b_base+8(FP), SI + MOVQ b_len+16(FP), DX + LEAQ (SI)(DX*1), BX + SUBQ $32, BX + + // Load vN from d. + MOVQ d+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ SI, BX + JLE blockLoop + + // Copy vN back to d. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // The number of bytes written is SI minus the old base pointer. + SUBQ b_base+8(FP), SI + MOVQ SI, ret+32(FP) + + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s new file mode 100644 index 00000000..4d64a17d --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -0,0 +1,186 @@ +// +build gc,!purego,!noasm + +#include "textflag.h" + +// Register allocation. +#define digest R1 +#define h R2 // Return value. +#define p R3 // Input pointer. +#define len R4 +#define nblocks R5 // len / 32. +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc \ + +// x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x \ + +#define mergeRound(x) \ + round0(x) \ + EOR x, h \ + MADD h, prime4, prime1, h \ + +// Update v[1-4] with 32-byte blocks. Assumes len >= 32. +#define blocksLoop() \ + LSR $5, len, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 32(p), (x1, x2) \ + round(v1, x1) \ + LDP -16(p), (x3, x4) \ + round(v2, x2) \ + SUB $1, nblocks \ + round(v3, x3) \ + round(v4, x4) \ + CBNZ nblocks, loop \ + +// The primes are repeated here to ensure that they're stored +// in a contiguous array, so we can load them with LDP. +DATA primes<> +0(SB)/8, $11400714785074694791 +DATA primes<> +8(SB)/8, $14029467366897019727 +DATA primes<>+16(SB)/8, $1609587929392839161 +DATA primes<>+24(SB)/8, $9650029242287828579 +DATA primes<>+32(SB)/8, $2870177450012600261 +GLOBL primes<>(SB), NOPTR+RODATA, $40 + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32 + LDP b_base+0(FP), (p, len) + + LDP primes<> +0(SB), (prime1, prime2) + LDP primes<>+16(SB), (prime3, prime4) + MOVD primes<>+32(SB), prime5 + + CMP $32, len + CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 } + BLO afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blocksLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(v1) + mergeRound(v2) + mergeRound(v3) + mergeRound(v4) + +afterLoop: + ADD len, h + + TBZ $4, len, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, len, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, len, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, len, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h + MUL prime1, h + +try1: + TBZ $0, len, end + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h + MUL prime1, h + +end: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +// +// Assumes len(b) >= 32. +TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40 + LDP primes<>(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, len) + + blocksLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, len + MOVD len, ret+32(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go new file mode 100644 index 00000000..1a1fac9c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -0,0 +1,16 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm +// +build amd64 arm64 +// +build !appengine +// +build gc +// +build !purego +// +build !noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +//go:noescape +func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go new file mode 100644 index 00000000..209cb4a9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -0,0 +1,77 @@ +//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm +// +build !amd64,!arm64 appengine !gc purego noasm + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // d := New() + // d.Write(b) + // return d.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(d *Digest, b []byte) int { + v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 + n := len(b) + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 + return n - len(b) +} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go new file mode 100644 index 00000000..6f3b0cb1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go @@ -0,0 +1,11 @@ +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} + +// WriteString adds more data to d. It always returns len(s), nil. +func (d *Digest) WriteString(s string) (n int, err error) { + return d.Write([]byte(s)) +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go new file mode 100644 index 00000000..f833d154 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -0,0 +1,509 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "errors" + "fmt" + "io" +) + +type seq struct { + litLen uint32 + matchLen uint32 + offset uint32 + + // Codes are stored here for the encoder + // so they only have to be looked up once. + llCode, mlCode, ofCode uint8 +} + +type seqVals struct { + ll, ml, mo int +} + +func (s seq) String() string { + if s.offset <= 3 { + if s.offset == 0 { + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") + } + return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") +} + +type seqCompMode uint8 + +const ( + compModePredefined seqCompMode = iota + compModeRLE + compModeFSE + compModeRepeat +) + +type sequenceDec struct { + // decoder keeps track of the current state and updates it from the bitstream. + fse *fseDecoder + state fseState + repeat bool +} + +// init the state of the decoder with input from stream. +func (s *sequenceDec) init(br *bitReader) error { + if s.fse == nil { + return errors.New("sequence decoder not defined") + } + s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + +// decode sequences from the stream with the provided history. +func (s *sequenceDecs) decodeSync(hist []byte) error { + supported, err := s.decodeSyncSimple(hist) + if supported { + return err + } + + br := s.br + seqs := s.nSeqs + startSize := len(s.out) + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + out := s.out + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + for i := seqs - 1; i >= 0; i-- { + if br.overread() { + printf("reading sequence %d, exceeded available data\n", seqs-i) + return io.ErrUnexpectedEOF + } + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + + if ll > len(s.literals) { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) + } + size := ll + ml + len(out) + if size-startSize > maxBlockSize { + if size-startSize == 424242 { + panic("here") + } + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + if size > cap(out) { + // Not enough size, which can happen under high volume block streaming conditions + // but could be if destination slice is too small for sync operations. + // over-allocating here can create a large amount of GC pressure so we try to keep + // it as contained as possible + used := len(out) - startSize + addBytes := 256 + ll + ml + used>>2 + // Clamp to max block size. + if used+addBytes > maxBlockSize { + addBytes = maxBlockSize - used + } + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + + // Add literals + out = append(out, s.literals[:ll]...) + s.literals = s.literals[ll:] + + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + + if mo > len(out)+len(hist) || mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + + // we may be in dictionary. + dictO := len(s.dict) - (mo - (len(out) + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) + } + end := dictO + ml + if end > len(s.dict) { + out = append(out, s.dict[dictO:]...) + ml -= len(s.dict) - dictO + } else { + out = append(out, s.dict[dictO:end]...) + mo = 0 + ml = 0 + } + } + + // Copy from history. + // TODO: Blocks without history could be made to ignore this completely. + if v := mo - len(out); v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if ml > v { + // Some goes into current block. + // Copy remainder of history + out = append(out, hist[start:]...) + ml -= v + } else { + out = append(out, hist[start:start+ml]...) + ml = 0 + } + } + // We must be in current buffer now + if ml > 0 { + start := len(out) - mo + if ml <= len(out)-start { + // No overlap + out = append(out, out[start:start+ml]...) + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + out = out[:len(out)+ml] + src := out[start : start+ml] + // Destination is the space we just added. + dst := out[len(out)-ml:] + dst = dst[:len(src)] + for i := range src { + dst[i] = src[i] + } + } + } + if i == 0 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + + // Check if space for literals + if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + + // Add final literals + s.out = append(out, s.literals...) + return br.close() +} + +var bitMask [16]uint16 + +func init() { + for i := range bitMask[:] { + bitMask[i] = uint16((1 << uint(i)) - 1) + } +} + +func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { + // Final will not read from stream. + ll, llB := llState.final() + ml, mlB := mlState.final() + mo, moB := ofState.final() + + // extra bits are stored in reverse order. + br.fill() + if s.maxBits <= 32 { + mo += br.getBits(moB) + ml += br.getBits(mlB) + ll += br.getBits(llB) + } else { + mo += br.getBits(moB) + br.fill() + // matchlength+literal length, max 32 bits + ml += br.getBits(mlB) + ll += br.getBits(llB) + + } + mo = s.adjustOffset(mo, ll, moB) + return +} + +func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { + if offsetB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = offset + return offset + } + + if litLen == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + offset++ + } + + if offset == 0 { + return s.prevOffset[0] + } + var temp int + if offset == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[offset] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("temp was 0") + temp = 1 + } + + if offset != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + return temp +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go new file mode 100644 index 00000000..191384ad --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -0,0 +1,379 @@ +//go:build amd64 && !appengine && !noasm && gc +// +build amd64,!appengine,!noasm,gc + +package zstd + +import ( + "fmt" + + "github.com/klauspost/compress/internal/cpuinfo" +) + +type decodeSyncAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + litRemain int + out []byte + outPosition int + literals []byte + litPosition int + history []byte + windowSize int + ll int // set on error (not for all errors, please refer to _generate/gen.go) + ml int // set on error (not for all errors, please refer to _generate/gen.go) + mo int // set on error (not for all errors, please refer to _generate/gen.go) +} + +// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. +// +//go:noescape +func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int + +// decode sequences from the stream with the provided history but without a dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + if len(s.dict) > 0 { + return false, nil + } + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { + return false, nil + } + + // FIXME: Using unsafe memory copies leads to rare, random crashes + // with fuzz testing. It is therefore disabled for now. + const useSafe = true + /* + useSafe := false + if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { + useSafe = true + } + if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { + useSafe = true + } + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + useSafe = true + } + */ + + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeSyncAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + iteration: s.nSeqs - 1, + litRemain: len(s.literals), + out: s.out, + outPosition: len(s.out), + literals: s.literals, + windowSize: s.windowSize, + history: hist, + } + + s.seqSize = 0 + startSize := len(s.out) + + var errCode int + if cpuinfo.HasBMI2() { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) + } + } else { + if useSafe { + errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) + } + } + switch errCode { + case noError: + break + + case errorMatchLenOfsMismatch: + return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) + + case errorMatchLenTooBig: + return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) + + case errorMatchOffTooBig: + return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", + ctx.mo, ctx.outPosition+len(hist)-startSize) + + case errorNotEnoughLiterals: + return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", + ctx.ll, ctx.litRemain+ctx.ll) + + case errorNotEnoughSpace: + size := ctx.outPosition + ctx.ll + ctx.ml + if debugDecoder { + println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) + } + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + default: + return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + return true, err + } + + s.literals = s.literals[ctx.litPosition:] + t := ctx.outPosition + s.out = s.out[:t] + + // Add final literals + s.out = append(s.out, s.literals...) + if debugDecoder { + t += len(s.literals) + if t != len(s.out) { + panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) + } + } + + return true, nil +} + +// -------------------------------------------------------------------------------- + +type decodeAsmContext struct { + llTable []decSymbol + mlTable []decSymbol + ofTable []decSymbol + llState uint64 + mlState uint64 + ofState uint64 + iteration int + seqs []seqVals + litRemain int +} + +const noError = 0 + +// error reported when mo == 0 && ml > 0 +const errorMatchLenOfsMismatch = 1 + +// error reported when ml > maxMatchLen +const errorMatchLenTooBig = 2 + +// error reported when mo > available history or mo > s.windowSize +const errorMatchOffTooBig = 3 + +// error reported when the sum of literal lengths exeeceds the literal buffer size +const errorNotEnoughLiterals = 4 + +// error reported when capacity of `out` is too small +const errorNotEnoughSpace = 5 + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. +// +//go:noescape +func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + + ctx := decodeAsmContext{ + llTable: s.litLengths.fse.dt[:maxTablesize], + mlTable: s.matchLengths.fse.dt[:maxTablesize], + ofTable: s.offsets.fse.dt[:maxTablesize], + llState: uint64(s.litLengths.state.state), + mlState: uint64(s.matchLengths.state.state), + ofState: uint64(s.offsets.state.state), + seqs: seqs, + iteration: len(seqs) - 1, + litRemain: len(s.literals), + } + + s.seqSize = 0 + lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 + var errCode int + if cpuinfo.HasBMI2() { + if lte56bits { + errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_bmi2(s, br, &ctx) + } + } else { + if lte56bits { + errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) + } else { + errCode = sequenceDecs_decode_amd64(s, br, &ctx) + } + } + if errCode != 0 { + i := len(seqs) - ctx.iteration - 1 + switch errCode { + case errorMatchLenOfsMismatch: + ml := ctx.seqs[i].ml + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + + case errorMatchLenTooBig: + ml := ctx.seqs[i].ml + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + + case errorNotEnoughLiterals: + ll := ctx.seqs[i].ll + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) + } + + return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + } + + if ctx.litRemain < 0 { + return fmt.Errorf("literal count is too big: total available %d, total requested %d", + len(s.literals), len(s.literals)-ctx.litRemain) + } + + s.seqSize += ctx.litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// -------------------------------------------------------------------------------- + +type executeAsmContext struct { + seqs []seqVals + seqIndex int + out []byte + history []byte + literals []byte + outPosition int + litPosition int + windowSize int +} + +// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. +// +// Returns false if a match offset is too big. +// +// Please refer to seqdec_generic.go for the reference implementation. +// +//go:noescape +func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool + +// Same as above, but with safe memcopies +// +//go:noescape +func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool + +// executeSimple handles cases when dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { + addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + ctx := executeAsmContext{ + seqs: seqs, + seqIndex: 0, + out: out, + history: hist, + outPosition: t, + litPosition: 0, + literals: s.literals, + windowSize: s.windowSize, + } + var ok bool + if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { + ok = sequenceDecs_executeSimple_safe_amd64(&ctx) + } else { + ok = sequenceDecs_executeSimple_amd64(&ctx) + } + if !ok { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", + seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) + } + s.literals = s.literals[ctx.litPosition:] + t = ctx.outPosition + + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s new file mode 100644 index 00000000..52e5703c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -0,0 +1,4099 @@ +// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. + +//go:build !appengine && !noasm && gc && !noasm + +// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_end + +sequenceDecs_decode_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_byte_by_byte + +sequenceDecs_decode_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decode_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_amd64_fill_2_end + +sequenceDecs_decode_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte + +sequenceDecs_decode_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_amd64_adjust_offset_nonzero + +sequenceDecs_decode_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_amd64_after_adjust + +sequenceDecs_decode_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_amd64_adjust_zero + JEQ sequenceDecs_decode_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_amd64_adjust_three + JMP sequenceDecs_decode_amd64_adjust_two + +sequenceDecs_decode_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_amd64_adjust_test_temp_valid + +sequenceDecs_decode_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: CMOV +TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + MOVQ 104(AX), R10 + MOVQ s+0(FP), AX + MOVQ 144(AX), R11 + MOVQ 152(AX), R12 + MOVQ 160(AX), R13 + +sequenceDecs_decode_56_amd64_main_loop: + MOVQ (SP), R14 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decode_56_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R14 + MOVQ (R14), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decode_56_amd64_fill_end + +sequenceDecs_decode_56_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decode_56_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decode_56_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R14 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R14), AX + ORQ AX, DX + JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte + +sequenceDecs_decode_56_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_of_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_of_update_zero: + MOVQ AX, 16(R10) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ml_update_zero: + MOVQ AX, 8(R10) + + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R15 + SHLQ CL, R15 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decode_56_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decode_56_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decode_56_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R15 + ADDQ R15, AX + +sequenceDecs_decode_56_amd64_ll_update_zero: + MOVQ AX, (R10) + + // Fill bitreader for state updates + MOVQ R14, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R14 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R14 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R14 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R14*1), CX + MOVQ DX, R15 + MOVQ CX, BX + ROLQ CL, R15 + MOVL $0x00000001, BP + MOVB R14, CL + SHLL CL, BP + DECL BP + ANDQ BP, R15 + ADDQ R15, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decode_56_amd64_skip_update: + // Adjust offset + MOVQ 16(R10), CX + CMPQ AX, $0x01 + JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 + MOVQ R12, R13 + MOVQ R11, R12 + MOVQ CX, R11 + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: + CMPQ (R10), $0x00000000 + JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero + +sequenceDecs_decode_56_amd64_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero + MOVQ R11, CX + JMP sequenceDecs_decode_56_amd64_after_adjust + +sequenceDecs_decode_56_amd64_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_amd64_adjust_zero + JEQ sequenceDecs_decode_56_amd64_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_amd64_adjust_three + JMP sequenceDecs_decode_56_amd64_adjust_two + +sequenceDecs_decode_56_amd64_adjust_zero: + MOVQ R11, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_one: + MOVQ R12, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_two: + MOVQ R13, AX + JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid + +sequenceDecs_decode_56_amd64_adjust_three: + LEAQ -1(R11), AX + +sequenceDecs_decode_56_amd64_adjust_test_temp_valid: + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid + MOVQ $0x00000001, AX + +sequenceDecs_decode_56_amd64_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R12, R13 + MOVQ R11, R12 + MOVQ AX, R11 + MOVQ AX, CX + +sequenceDecs_decode_56_amd64_after_adjust: + MOVQ CX, 16(R10) + + // Check values + MOVQ 8(R10), AX + MOVQ (R10), R14 + LEAQ (AX)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decode_56_amd64_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_amd64_match_len_ofs_ok: + ADDQ $0x18, R10 + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decode_56_amd64_main_loop + MOVQ s+0(FP), AX + MOVQ R11, 144(AX) + MOVQ R12, 152(AX) + MOVQ R13, 160(AX) + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_amd64_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_end + +sequenceDecs_decode_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_byte_by_byte + +sequenceDecs_decode_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_bmi2_fill_2_end + +sequenceDecs_decode_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte + +sequenceDecs_decode_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_bmi2_after_adjust + +sequenceDecs_decode_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_bmi2_adjust_zero + JEQ sequenceDecs_decode_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_bmi2_adjust_three + JMP sequenceDecs_decode_bmi2_adjust_two + +sequenceDecs_decode_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int +// Requires: BMI, BMI2, CMOV +TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + MOVQ 104(CX), R9 + MOVQ s+0(FP), CX + MOVQ 144(CX), R10 + MOVQ 152(CX), R11 + MOVQ 160(CX), R12 + +sequenceDecs_decode_56_bmi2_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R13 + MOVQ (R13), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decode_56_bmi2_fill_end + +sequenceDecs_decode_56_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decode_56_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decode_56_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R13 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R13), CX + ORQ CX, AX + JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte + +sequenceDecs_decode_56_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 16(R9) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, 8(R9) + + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R14 + MOVQ AX, R15 + LEAQ (DX)(R14*1), CX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R15, CX + MOVQ CX, (R9) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decode_56_bmi2_skip_update + LEAQ (SI)(DI*1), R14 + ADDQ R8, R14 + MOVBQZX R14, R14 + LEAQ (DX)(R14*1), CX + MOVQ AX, R15 + MOVQ CX, DX + ROLQ CL, R15 + BZHIQ R14, R15, R15 + + // Update Offset State + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + MOVQ $0x00001010, R14 + BEXTRQ R14, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R15, CX + MOVQ $0x00001010, R14 + BEXTRQ R14, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decode_56_bmi2_skip_update: + // Adjust offset + MOVQ 16(R9), CX + CMPQ R13, $0x01 + JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 + MOVQ R11, R12 + MOVQ R10, R11 + MOVQ CX, R10 + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: + CMPQ (R9), $0x00000000 + JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero + INCQ CX + JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + +sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero + MOVQ R10, CX + JMP sequenceDecs_decode_56_bmi2_after_adjust + +sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: + CMPQ CX, $0x01 + JB sequenceDecs_decode_56_bmi2_adjust_zero + JEQ sequenceDecs_decode_56_bmi2_adjust_one + CMPQ CX, $0x02 + JA sequenceDecs_decode_56_bmi2_adjust_three + JMP sequenceDecs_decode_56_bmi2_adjust_two + +sequenceDecs_decode_56_bmi2_adjust_zero: + MOVQ R10, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_one: + MOVQ R11, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_two: + MOVQ R12, R13 + JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid + +sequenceDecs_decode_56_bmi2_adjust_three: + LEAQ -1(R10), R13 + +sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid + MOVQ $0x00000001, R13 + +sequenceDecs_decode_56_bmi2_adjust_temp_valid: + CMPQ CX, $0x01 + CMOVQNE R11, R12 + MOVQ R10, R11 + MOVQ R13, R10 + MOVQ R13, CX + +sequenceDecs_decode_56_bmi2_after_adjust: + MOVQ CX, 16(R9) + + // Check values + MOVQ 8(R9), R13 + MOVQ (R9), R14 + LEAQ (R13)(R14*1), R15 + MOVQ s+0(FP), BP + ADDQ R15, 256(BP) + MOVQ ctx+16(FP), R15 + SUBQ R14, 128(R15) + JS error_not_enough_literals + CMPQ R13, $0x00020002 + JA sequenceDecs_decode_56_bmi2_error_match_len_too_big + TESTQ CX, CX + JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok + TESTQ R13, R13 + JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decode_56_bmi2_match_len_ofs_ok: + ADDQ $0x18, R9 + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decode_56_bmi2_main_loop + MOVQ s+0(FP), CX + MOVQ R10, 144(CX) + MOVQ R11, 152(CX) + MOVQ R12, 160(CX) + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decode_56_bmi2_error_match_len_too_big: + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (SI)(R14*1), X0 + MOVUPS X0, (BX)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, R11 + JB copy_1 + ADDQ R11, SI + ADDQ R11, BX + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ BX, R12 + ADDQ R13, BX + +copy_2: + MOVUPS (R11), X0 + MOVUPS X0, (R12) + ADDQ $0x10, R11 + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool +// Requires: SSE +TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 + MOVQ ctx+0(FP), R10 + MOVQ 8(R10), CX + TESTQ CX, CX + JZ empty_seqs + MOVQ (R10), AX + MOVQ 24(R10), DX + MOVQ 32(R10), BX + MOVQ 80(R10), SI + MOVQ 104(R10), DI + MOVQ 120(R10), R8 + MOVQ 56(R10), R9 + MOVQ 64(R10), R10 + ADDQ R10, R9 + + // seqsBase += 24 * seqIndex + LEAQ (DX)(DX*2), R11 + SHLQ $0x03, R11 + ADDQ R11, AX + + // outBase += outPosition + ADDQ DI, BX + +main_loop: + MOVQ (AX), R11 + MOVQ 16(AX), R12 + MOVQ 8(AX), R13 + + // Copy literals + TESTQ R11, R11 + JZ check_offset + MOVQ R11, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (SI), X0 + MOVUPS X0, (BX) + ADDQ $0x10, SI + ADDQ $0x10, BX + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(SI)(R14*1), SI + LEAQ 16(BX)(R14*1), BX + MOVUPS -16(SI), X0 + MOVUPS X0, -16(BX) + JMP copy_1_end + +copy_1_small: + CMPQ R11, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ R11, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (SI), R14 + MOVB -1(SI)(R11*1), R15 + MOVB R14, (BX) + MOVB R15, -1(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_3: + MOVW (SI), R14 + MOVB 2(SI), R15 + MOVW R14, (BX) + MOVB R15, 2(BX) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_4through7: + MOVL (SI), R14 + MOVL -4(SI)(R11*1), R15 + MOVL R14, (BX) + MOVL R15, -4(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (SI), R14 + MOVQ -8(SI)(R11*1), R15 + MOVQ R14, (BX) + MOVQ R15, -8(BX)(R11*1) + ADDQ R11, SI + ADDQ R11, BX + +copy_1_end: + ADDQ R11, DI + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + LEAQ (DI)(R10*1), R11 + CMPQ R12, R11 + JG error_match_off_too_big + CMPQ R12, R8 + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, R11 + SUBQ DI, R11 + JLS copy_match + MOVQ R9, R14 + SUBQ R11, R14 + CMPQ R13, R11 + JG copy_all_from_history + MOVQ R13, R11 + SUBQ $0x10, R11 + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R11 + JAE copy_4_loop + LEAQ 16(R14)(R11*1), R14 + LEAQ 16(BX)(R11*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), R11 + MOVB 2(R14), R12 + MOVW R11, (BX) + MOVB R12, 2(BX) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), R11 + MOVL -4(R14)(R13*1), R12 + MOVL R11, (BX) + MOVL R12, -4(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), R11 + MOVQ -8(R14)(R13*1), R12 + MOVQ R11, (BX) + MOVQ R12, -8(BX)(R13*1) + ADDQ R13, R14 + ADDQ R13, BX + +copy_4_end: + ADDQ R13, DI + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + JMP loop_finished + +copy_all_from_history: + MOVQ R11, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R14 + ADDQ $0x10, BX + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(BX)(R15*1), BX + MOVUPS -16(R14), X0 + MOVUPS X0, -16(BX) + JMP copy_5_end + +copy_5_small: + CMPQ R11, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ R11, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(R11*1), BP + MOVB R15, (BX) + MOVB BP, -1(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (BX) + MOVB BP, 2(BX) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(R11*1), BP + MOVL R15, (BX) + MOVL BP, -4(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(R11*1), BP + MOVQ R15, (BX) + MOVQ BP, -8(BX)(R11*1) + ADDQ R11, R14 + ADDQ R11, BX + +copy_5_end: + ADDQ R11, DI + SUBQ R11, R13 + + // Copy match from the current buffer +copy_match: + MOVQ BX, R11 + SUBQ R12, R11 + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, DI + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (R11), X0 + MOVUPS X0, (BX) + ADDQ $0x10, R11 + ADDQ $0x10, BX + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(R11)(R12*1), R11 + LEAQ 16(BX)(R12*1), BX + MOVUPS -16(R11), X0 + MOVUPS X0, -16(BX) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (R11), R12 + MOVB -1(R11)(R13*1), R14 + MOVB R12, (BX) + MOVB R14, -1(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_3: + MOVW (R11), R12 + MOVB 2(R11), R14 + MOVW R12, (BX) + MOVB R14, 2(BX) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_4through7: + MOVL (R11), R12 + MOVL -4(R11)(R13*1), R14 + MOVL R12, (BX) + MOVL R14, -4(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (R11), R12 + MOVQ -8(R11)(R13*1), R14 + MOVQ R12, (BX) + MOVQ R14, -8(BX)(R13*1) + ADDQ R13, R11 + ADDQ R13, BX + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, DI + +copy_slow_3: + MOVB (R11), R12 + MOVB R12, (BX) + INCQ R11 + INCQ BX + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + ADDQ $0x18, AX + INCQ DX + CMPQ DX, CX + JB main_loop + +loop_finished: + // Return value + MOVB $0x01, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +error_match_off_too_big: + // Return value + MOVB $0x00, ret+8(FP) + + // Update the context + MOVQ ctx+0(FP), AX + MOVQ DX, 24(AX) + MOVQ DI, 104(AX) + MOVQ 80(AX), CX + SUBQ CX, SI + MOVQ SI, 112(AX) + RET + +empty_seqs: + // Return value + MOVB $0x01, ret+8(FP) + RET + +// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_end + +sequenceDecs_decodeSync_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_amd64_fill_2_end + +sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_amd64_after_adjust + +sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R11)(R14*1), X0 + MOVUPS X0, (R10)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, AX + JB copy_1 + ADDQ AX, R11 + ADDQ AX, R10 + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R10, CX + ADDQ R13, R10 + +copy_2: + MOVUPS (AX), X0 + MOVUPS X0, (CX) + ADDQ $0x10, AX + ADDQ $0x10, CX + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_end + +sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_bmi2_fill_2_end + +sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_bmi2_after_adjust + +sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + XORQ R14, R14 + +copy_1: + MOVUPS (R10)(R14*1), X0 + MOVUPS X0, (R9)(R14*1) + ADDQ $0x10, R14 + CMPQ R14, CX + JB copy_1 + ADDQ CX, R10 + ADDQ CX, R9 + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R9, R12 + ADDQ R13, R9 + +copy_2: + MOVUPS (CX), X0 + MOVUPS X0, (R12) + ADDQ $0x10, CX + ADDQ $0x10, R12 + SUBQ $0x10, R13 + JHI copy_2 + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 + MOVQ br+8(FP), AX + MOVQ 32(AX), DX + MOVBQZX 40(AX), BX + MOVQ 24(AX), SI + MOVQ (AX), AX + ADDQ SI, AX + MOVQ AX, (SP) + MOVQ ctx+16(FP), AX + MOVQ 72(AX), DI + MOVQ 80(AX), R8 + MOVQ 88(AX), R9 + XORQ CX, CX + MOVQ CX, 8(SP) + MOVQ CX, 16(SP) + MOVQ CX, 24(SP) + MOVQ 112(AX), R10 + MOVQ 128(AX), CX + MOVQ CX, 32(SP) + MOVQ 144(AX), R11 + MOVQ 136(AX), R12 + MOVQ 200(AX), CX + MOVQ CX, 56(SP) + MOVQ 176(AX), CX + MOVQ CX, 48(SP) + MOVQ 184(AX), AX + MOVQ AX, 40(SP) + MOVQ 40(SP), AX + ADDQ AX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R10, 32(SP) + + // outBase += outPosition + ADDQ R12, R10 + +sequenceDecs_decodeSync_safe_amd64_main_loop: + MOVQ (SP), R13 + + // Fill bitreader to have enough for the offset and match length. + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_end + +sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_end: + // Update offset + MOVQ R9, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_of_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_of_update_zero: + MOVQ AX, 8(SP) + + // Update match length + MOVQ R8, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ml_update_zero: + MOVQ AX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ SI, $0x08 + JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + MOVQ BX, AX + SHRQ $0x03, AX + SUBQ AX, R13 + MOVQ (R13), DX + SUBQ AX, SI + ANDQ $0x07, BX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end + +sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: + CMPQ SI, $0x00 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + CMPQ BX, $0x07 + JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end + SHLQ $0x08, DX + SUBQ $0x01, R13 + SUBQ $0x01, SI + SUBQ $0x08, BX + MOVBQZX (R13), AX + ORQ AX, DX + JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_amd64_fill_2_end: + // Update literal length + MOVQ DI, AX + MOVQ BX, CX + MOVQ DX, R14 + SHLQ CL, R14 + MOVB AH, CL + SHRQ $0x20, AX + TESTQ CX, CX + JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero + ADDQ CX, BX + CMPQ BX, $0x40 + JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero + CMPQ CX, $0x40 + JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero + NEGQ CX + SHRQ CL, R14 + ADDQ R14, AX + +sequenceDecs_decodeSync_safe_amd64_ll_update_zero: + MOVQ AX, 24(SP) + + // Fill bitreader for state updates + MOVQ R13, (SP) + MOVQ R9, AX + SHRQ $0x08, AX + MOVBQZX AL, AX + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_amd64_skip_update + + // Update Literal Length State + MOVBQZX DI, R13 + SHRQ $0x10, DI + MOVWQZX DI, DI + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, DI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(DI*8), DI + + // Update Match Length State + MOVBQZX R8, R13 + SHRQ $0x10, R8 + MOVWQZX R8, R8 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R8 + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Offset State + MOVBQZX R9, R13 + SHRQ $0x10, R9 + MOVWQZX R9, R9 + LEAQ (BX)(R13*1), CX + MOVQ DX, R14 + MOVQ CX, BX + ROLQ CL, R14 + MOVL $0x00000001, R15 + MOVB R13, CL + SHLL CL, R15 + DECL R15 + ANDQ R15, R14 + ADDQ R14, R9 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R9*8), R9 + +sequenceDecs_decodeSync_safe_amd64_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ AX, $0x01 + JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_amd64_after_adjust + +sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: + MOVQ R13, AX + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, AX + CMOVQEQ R15, R14 + ADDQ 144(CX)(AX*8), R14 + JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip + MOVQ 152(CX), AX + MOVQ AX, 160(CX) + +sequenceDecs_decodeSync_safe_amd64_adjust_skip: + MOVQ 144(CX), AX + MOVQ AX, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_amd64_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), AX + MOVQ 24(SP), CX + LEAQ (AX)(CX*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ CX, 104(R14) + JS error_not_enough_literals + CMPQ AX, $0x00020002 + JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok + TESTQ AX, AX + JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: + MOVQ 24(SP), AX + MOVQ 8(SP), CX + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (AX)(R13*1), R14 + ADDQ R10, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ AX, AX + JZ check_offset + MOVQ AX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R11), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R11 + ADDQ $0x10, R10 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R11)(R14*1), R11 + LEAQ 16(R10)(R14*1), R10 + MOVUPS -16(R11), X0 + MOVUPS X0, -16(R10) + JMP copy_1_end + +copy_1_small: + CMPQ AX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ AX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R11), R14 + MOVB -1(R11)(AX*1), R15 + MOVB R14, (R10) + MOVB R15, -1(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_3: + MOVW (R11), R14 + MOVB 2(R11), R15 + MOVW R14, (R10) + MOVB R15, 2(R10) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R11), R14 + MOVL -4(R11)(AX*1), R15 + MOVL R14, (R10) + MOVL R15, -4(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R11), R14 + MOVQ -8(R11)(AX*1), R15 + MOVQ R14, (R10) + MOVQ R15, -8(R10)(AX*1) + ADDQ AX, R11 + ADDQ AX, R10 + +copy_1_end: + ADDQ AX, R12 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R12, AX + ADDQ 40(SP), AX + CMPQ CX, AX + JG error_match_off_too_big + CMPQ CX, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ CX, AX + SUBQ R12, AX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ AX, R14 + CMPQ R13, AX + JG copy_all_from_history + MOVQ R13, AX + SUBQ $0x10, AX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, AX + JAE copy_4_loop + LEAQ 16(R14)(AX*1), R14 + LEAQ 16(R10)(AX*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), AX + MOVB 2(R14), CL + MOVW AX, (R10) + MOVB CL, 2(R10) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), AX + MOVL -4(R14)(R13*1), CX + MOVL AX, (R10) + MOVL CX, -4(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), AX + MOVQ -8(R14)(R13*1), CX + MOVQ AX, (R10) + MOVQ CX, -8(R10)(R13*1) + ADDQ R13, R14 + ADDQ R13, R10 + +copy_4_end: + ADDQ R13, R12 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ AX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R10) + ADDQ $0x10, R14 + ADDQ $0x10, R10 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R10)(R15*1), R10 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R10) + JMP copy_5_end + +copy_5_small: + CMPQ AX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ AX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(AX*1), BP + MOVB R15, (R10) + MOVB BP, -1(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R10) + MOVB BP, 2(R10) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(AX*1), BP + MOVL R15, (R10) + MOVL BP, -4(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(AX*1), BP + MOVQ R15, (R10) + MOVQ BP, -8(R10)(AX*1) + ADDQ AX, R14 + ADDQ AX, R10 + +copy_5_end: + ADDQ AX, R12 + SUBQ AX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R10, AX + SUBQ CX, AX + + // ml <= mo + CMPQ R13, CX + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R12 + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_2_small + +copy_2_loop: + MOVUPS (AX), X0 + MOVUPS X0, (R10) + ADDQ $0x10, AX + ADDQ $0x10, R10 + SUBQ $0x10, CX + JAE copy_2_loop + LEAQ 16(AX)(CX*1), AX + LEAQ 16(R10)(CX*1), R10 + MOVUPS -16(AX), X0 + MOVUPS X0, -16(R10) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (AX), CL + MOVB -1(AX)(R13*1), R14 + MOVB CL, (R10) + MOVB R14, -1(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_3: + MOVW (AX), CX + MOVB 2(AX), R14 + MOVW CX, (R10) + MOVB R14, 2(R10) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (AX), CX + MOVL -4(AX)(R13*1), R14 + MOVL CX, (R10) + MOVL R14, -4(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (AX), CX + MOVQ -8(AX)(R13*1), R14 + MOVQ CX, (R10) + MOVQ R14, -8(R10)(R13*1) + ADDQ R13, AX + ADDQ R13, R10 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R12 + +copy_slow_3: + MOVB (AX), CL + MOVB CL, (R10) + INCQ AX + INCQ R10 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), AX + DECQ 96(AX) + JNS sequenceDecs_decodeSync_safe_amd64_main_loop + +loop_finished: + MOVQ br+8(FP), AX + MOVQ DX, 32(AX) + MOVB BL, 40(AX) + MOVQ SI, 24(AX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R12, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R11 + MOVQ R11, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R12, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET + +// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int +// Requires: BMI, BMI2, CMOV, SSE +TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 + MOVQ br+8(FP), CX + MOVQ 32(CX), AX + MOVBQZX 40(CX), DX + MOVQ 24(CX), BX + MOVQ (CX), CX + ADDQ BX, CX + MOVQ CX, (SP) + MOVQ ctx+16(FP), CX + MOVQ 72(CX), SI + MOVQ 80(CX), DI + MOVQ 88(CX), R8 + XORQ R9, R9 + MOVQ R9, 8(SP) + MOVQ R9, 16(SP) + MOVQ R9, 24(SP) + MOVQ 112(CX), R9 + MOVQ 128(CX), R10 + MOVQ R10, 32(SP) + MOVQ 144(CX), R10 + MOVQ 136(CX), R11 + MOVQ 200(CX), R12 + MOVQ R12, 56(SP) + MOVQ 176(CX), R12 + MOVQ R12, 48(SP) + MOVQ 184(CX), CX + MOVQ CX, 40(SP) + MOVQ 40(SP), CX + ADDQ CX, 48(SP) + + // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + ADDQ R9, 32(SP) + + // outBase += outPosition + ADDQ R11, R9 + +sequenceDecs_decodeSync_safe_bmi2_main_loop: + MOVQ (SP), R12 + + // Fill bitreader to have enough for the offset and match length. + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_end + +sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_end: + // Update offset + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ R8, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 8(SP) + + // Update match length + MOVQ $0x00000808, CX + BEXTRQ CX, DI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ DI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 16(SP) + + // Fill bitreader to have enough for the remaining + CMPQ BX, $0x08 + JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + MOVQ DX, CX + SHRQ $0x03, CX + SUBQ CX, R12 + MOVQ (R12), AX + SUBQ CX, BX + ANDQ $0x07, DX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end + +sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: + CMPQ BX, $0x00 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + CMPQ DX, $0x07 + JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end + SHLQ $0x08, AX + SUBQ $0x01, R12 + SUBQ $0x01, BX + SUBQ $0x08, DX + MOVBQZX (R12), CX + ORQ CX, AX + JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte + +sequenceDecs_decodeSync_safe_bmi2_fill_2_end: + // Update literal length + MOVQ $0x00000808, CX + BEXTRQ CX, SI, R13 + MOVQ AX, R14 + LEAQ (DX)(R13*1), CX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + MOVQ CX, DX + MOVQ SI, CX + SHRQ $0x20, CX + ADDQ R14, CX + MOVQ CX, 24(SP) + + // Fill bitreader for state updates + MOVQ R12, (SP) + MOVQ $0x00000808, CX + BEXTRQ CX, R8, R12 + MOVQ ctx+16(FP), CX + CMPQ 96(CX), $0x00 + JZ sequenceDecs_decodeSync_safe_bmi2_skip_update + LEAQ (SI)(DI*1), R13 + ADDQ R8, R13 + MOVBQZX R13, R13 + LEAQ (DX)(R13*1), CX + MOVQ AX, R14 + MOVQ CX, DX + ROLQ CL, R14 + BZHIQ R13, R14, R14 + + // Update Offset State + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, R8, R8 + ADDQ CX, R8 + + // Load ctx.ofTable + MOVQ ctx+16(FP), CX + MOVQ 48(CX), CX + MOVQ (CX)(R8*8), R8 + + // Update Match Length State + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + MOVQ $0x00001010, R13 + BEXTRQ R13, DI, DI + ADDQ CX, DI + + // Load ctx.mlTable + MOVQ ctx+16(FP), CX + MOVQ 24(CX), CX + MOVQ (CX)(DI*8), DI + + // Update Literal Length State + BZHIQ SI, R14, CX + MOVQ $0x00001010, R13 + BEXTRQ R13, SI, SI + ADDQ CX, SI + + // Load ctx.llTable + MOVQ ctx+16(FP), CX + MOVQ (CX), CX + MOVQ (CX)(SI*8), SI + +sequenceDecs_decodeSync_safe_bmi2_skip_update: + // Adjust offset + MOVQ s+0(FP), CX + MOVQ 8(SP), R13 + CMPQ R12, $0x01 + JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 + MOVUPS 144(CX), X0 + MOVQ R13, 144(CX) + MOVUPS X0, 152(CX) + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: + CMPQ 24(SP), $0x00000000 + JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero + INCQ R13 + JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero + MOVQ 144(CX), R13 + JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust + +sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: + MOVQ R13, R12 + XORQ R14, R14 + MOVQ $-1, R15 + CMPQ R13, $0x03 + CMOVQEQ R14, R12 + CMOVQEQ R15, R14 + ADDQ 144(CX)(R12*8), R14 + JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid + MOVQ $0x00000001, R14 + +sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: + CMPQ R13, $0x01 + JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip + MOVQ 152(CX), R12 + MOVQ R12, 160(CX) + +sequenceDecs_decodeSync_safe_bmi2_adjust_skip: + MOVQ 144(CX), R12 + MOVQ R12, 152(CX) + MOVQ R14, 144(CX) + MOVQ R14, R13 + +sequenceDecs_decodeSync_safe_bmi2_after_adjust: + MOVQ R13, 8(SP) + + // Check values + MOVQ 16(SP), CX + MOVQ 24(SP), R12 + LEAQ (CX)(R12*1), R14 + MOVQ s+0(FP), R15 + ADDQ R14, 256(R15) + MOVQ ctx+16(FP), R14 + SUBQ R12, 104(R14) + JS error_not_enough_literals + CMPQ CX, $0x00020002 + JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big + TESTQ R13, R13 + JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok + TESTQ CX, CX + JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch + +sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: + MOVQ 24(SP), CX + MOVQ 8(SP), R12 + MOVQ 16(SP), R13 + + // Check if we have enough space in s.out + LEAQ (CX)(R13*1), R14 + ADDQ R9, R14 + CMPQ R14, 32(SP) + JA error_not_enough_space + + // Copy literals + TESTQ CX, CX + JZ check_offset + MOVQ CX, R14 + SUBQ $0x10, R14 + JB copy_1_small + +copy_1_loop: + MOVUPS (R10), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R10 + ADDQ $0x10, R9 + SUBQ $0x10, R14 + JAE copy_1_loop + LEAQ 16(R10)(R14*1), R10 + LEAQ 16(R9)(R14*1), R9 + MOVUPS -16(R10), X0 + MOVUPS X0, -16(R9) + JMP copy_1_end + +copy_1_small: + CMPQ CX, $0x03 + JE copy_1_move_3 + JB copy_1_move_1or2 + CMPQ CX, $0x08 + JB copy_1_move_4through7 + JMP copy_1_move_8through16 + +copy_1_move_1or2: + MOVB (R10), R14 + MOVB -1(R10)(CX*1), R15 + MOVB R14, (R9) + MOVB R15, -1(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_3: + MOVW (R10), R14 + MOVB 2(R10), R15 + MOVW R14, (R9) + MOVB R15, 2(R9) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_4through7: + MOVL (R10), R14 + MOVL -4(R10)(CX*1), R15 + MOVL R14, (R9) + MOVL R15, -4(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + JMP copy_1_end + +copy_1_move_8through16: + MOVQ (R10), R14 + MOVQ -8(R10)(CX*1), R15 + MOVQ R14, (R9) + MOVQ R15, -8(R9)(CX*1) + ADDQ CX, R10 + ADDQ CX, R9 + +copy_1_end: + ADDQ CX, R11 + + // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) +check_offset: + MOVQ R11, CX + ADDQ 40(SP), CX + CMPQ R12, CX + JG error_match_off_too_big + CMPQ R12, 56(SP) + JG error_match_off_too_big + + // Copy match from history + MOVQ R12, CX + SUBQ R11, CX + JLS copy_match + MOVQ 48(SP), R14 + SUBQ CX, R14 + CMPQ R13, CX + JG copy_all_from_history + MOVQ R13, CX + SUBQ $0x10, CX + JB copy_4_small + +copy_4_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, CX + JAE copy_4_loop + LEAQ 16(R14)(CX*1), R14 + LEAQ 16(R9)(CX*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_4_end + +copy_4_small: + CMPQ R13, $0x03 + JE copy_4_move_3 + CMPQ R13, $0x08 + JB copy_4_move_4through7 + JMP copy_4_move_8through16 + +copy_4_move_3: + MOVW (R14), CX + MOVB 2(R14), R12 + MOVW CX, (R9) + MOVB R12, 2(R9) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_4through7: + MOVL (R14), CX + MOVL -4(R14)(R13*1), R12 + MOVL CX, (R9) + MOVL R12, -4(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + JMP copy_4_end + +copy_4_move_8through16: + MOVQ (R14), CX + MOVQ -8(R14)(R13*1), R12 + MOVQ CX, (R9) + MOVQ R12, -8(R9)(R13*1) + ADDQ R13, R14 + ADDQ R13, R9 + +copy_4_end: + ADDQ R13, R11 + JMP handle_loop + JMP loop_finished + +copy_all_from_history: + MOVQ CX, R15 + SUBQ $0x10, R15 + JB copy_5_small + +copy_5_loop: + MOVUPS (R14), X0 + MOVUPS X0, (R9) + ADDQ $0x10, R14 + ADDQ $0x10, R9 + SUBQ $0x10, R15 + JAE copy_5_loop + LEAQ 16(R14)(R15*1), R14 + LEAQ 16(R9)(R15*1), R9 + MOVUPS -16(R14), X0 + MOVUPS X0, -16(R9) + JMP copy_5_end + +copy_5_small: + CMPQ CX, $0x03 + JE copy_5_move_3 + JB copy_5_move_1or2 + CMPQ CX, $0x08 + JB copy_5_move_4through7 + JMP copy_5_move_8through16 + +copy_5_move_1or2: + MOVB (R14), R15 + MOVB -1(R14)(CX*1), BP + MOVB R15, (R9) + MOVB BP, -1(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_3: + MOVW (R14), R15 + MOVB 2(R14), BP + MOVW R15, (R9) + MOVB BP, 2(R9) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_4through7: + MOVL (R14), R15 + MOVL -4(R14)(CX*1), BP + MOVL R15, (R9) + MOVL BP, -4(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + JMP copy_5_end + +copy_5_move_8through16: + MOVQ (R14), R15 + MOVQ -8(R14)(CX*1), BP + MOVQ R15, (R9) + MOVQ BP, -8(R9)(CX*1) + ADDQ CX, R14 + ADDQ CX, R9 + +copy_5_end: + ADDQ CX, R11 + SUBQ CX, R13 + + // Copy match from the current buffer +copy_match: + MOVQ R9, CX + SUBQ R12, CX + + // ml <= mo + CMPQ R13, R12 + JA copy_overlapping_match + + // Copy non-overlapping match + ADDQ R13, R11 + MOVQ R13, R12 + SUBQ $0x10, R12 + JB copy_2_small + +copy_2_loop: + MOVUPS (CX), X0 + MOVUPS X0, (R9) + ADDQ $0x10, CX + ADDQ $0x10, R9 + SUBQ $0x10, R12 + JAE copy_2_loop + LEAQ 16(CX)(R12*1), CX + LEAQ 16(R9)(R12*1), R9 + MOVUPS -16(CX), X0 + MOVUPS X0, -16(R9) + JMP copy_2_end + +copy_2_small: + CMPQ R13, $0x03 + JE copy_2_move_3 + JB copy_2_move_1or2 + CMPQ R13, $0x08 + JB copy_2_move_4through7 + JMP copy_2_move_8through16 + +copy_2_move_1or2: + MOVB (CX), R12 + MOVB -1(CX)(R13*1), R14 + MOVB R12, (R9) + MOVB R14, -1(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_3: + MOVW (CX), R12 + MOVB 2(CX), R14 + MOVW R12, (R9) + MOVB R14, 2(R9) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_4through7: + MOVL (CX), R12 + MOVL -4(CX)(R13*1), R14 + MOVL R12, (R9) + MOVL R14, -4(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + JMP copy_2_end + +copy_2_move_8through16: + MOVQ (CX), R12 + MOVQ -8(CX)(R13*1), R14 + MOVQ R12, (R9) + MOVQ R14, -8(R9)(R13*1) + ADDQ R13, CX + ADDQ R13, R9 + +copy_2_end: + JMP handle_loop + + // Copy overlapping match +copy_overlapping_match: + ADDQ R13, R11 + +copy_slow_3: + MOVB (CX), R12 + MOVB R12, (R9) + INCQ CX + INCQ R9 + DECQ R13 + JNZ copy_slow_3 + +handle_loop: + MOVQ ctx+16(FP), CX + DECQ 96(CX) + JNS sequenceDecs_decodeSync_safe_bmi2_main_loop + +loop_finished: + MOVQ br+8(FP), CX + MOVQ AX, 32(CX) + MOVB DL, 40(CX) + MOVQ BX, 24(CX) + + // Update the context + MOVQ ctx+16(FP), AX + MOVQ R11, 136(AX) + MOVQ 144(AX), CX + SUBQ CX, R10 + MOVQ R10, 168(AX) + + // Return success + MOVQ $0x00000000, ret+24(FP) + RET + + // Return with match length error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: + MOVQ 16(SP), AX + MOVQ ctx+16(FP), CX + MOVQ AX, 216(CX) + MOVQ $0x00000001, ret+24(FP) + RET + + // Return with match too long error +sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: + MOVQ ctx+16(FP), AX + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ $0x00000002, ret+24(FP) + RET + + // Return with match offset too long error +error_match_off_too_big: + MOVQ ctx+16(FP), AX + MOVQ 8(SP), CX + MOVQ CX, 224(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000003, ret+24(FP) + RET + + // Return with not enough literals error +error_not_enough_literals: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ $0x00000004, ret+24(FP) + RET + + // Return with not enough output space error +error_not_enough_space: + MOVQ ctx+16(FP), AX + MOVQ 24(SP), CX + MOVQ CX, 208(AX) + MOVQ 16(SP), CX + MOVQ CX, 216(AX) + MOVQ R11, 136(AX) + MOVQ $0x00000005, ret+24(FP) + RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go new file mode 100644 index 00000000..ac2a80d2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -0,0 +1,237 @@ +//go:build !amd64 || appengine || !gc || noasm +// +build !amd64 appengine !gc noasm + +package zstd + +import ( + "fmt" + "io" +) + +// decode sequences from the stream with the provided history but without dictionary. +func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { + return false, nil +} + +// decode sequences from the stream without the provided history. +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + maxBlockSize := maxCompressedBlockSize + if s.windowSize < maxBlockSize { + maxBlockSize = s.windowSize + } + for i := range seqs { + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// executeSimple handles cases when a dictionary is not used. +func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Malformed input + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into the current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + + // We must be in the current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go new file mode 100644 index 00000000..8014174a --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -0,0 +1,114 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import "math/bits" + +type seqCoders struct { + llEnc, ofEnc, mlEnc *fseEncoder + llPrev, ofPrev, mlPrev *fseEncoder +} + +// swap coders with another (block). +func (s *seqCoders) swap(other *seqCoders) { + *s, *other = *other, *s +} + +// setPrev will update the previous encoders to the actually used ones +// and make sure a fresh one is in the main slot. +func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { + compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { + // We used the new one, more current to history and reuse the previous history + if *current == used { + *prev, *current = *current, *prev + c := *current + p := *prev + c.reUsed = false + p.reUsed = true + return + } + if used == *prev { + return + } + // Ensure we cannot reuse by accident + prevEnc := *prev + prevEnc.symbolLen = 0 + } + compareSwap(ll, &s.llEnc, &s.llPrev) + compareSwap(ml, &s.mlEnc, &s.mlPrev) + compareSwap(of, &s.ofEnc, &s.ofPrev) +} + +func highBit(val uint32) (n uint32) { + return uint32(bits.Len32(val) - 1) +} + +var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24} + +// Up to 6 bits +const maxLLCode = 35 + +// llBitsTable translates from ll code to number of bits. +var llBitsTable = [maxLLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 6, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16} + +// llCode returns the code that represents the literal length requested. +func llCode(litLength uint32) uint8 { + const llDeltaCode = 19 + if litLength <= 63 { + // Compiler insists on bounds check (Go 1.12) + return llCodeTable[litLength&63] + } + return uint8(highBit(litLength)) + llDeltaCode +} + +var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} + +// Up to 6 bits +const maxMLCode = 52 + +// mlBitsTable translates from ml code to number of bits. +var mlBitsTable = [maxMLCode + 1]byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16} + +// note : mlBase = matchLength - MINMATCH; +// because it's the format it's stored in seqStore->sequences +func mlCode(mlBase uint32) uint8 { + const mlDeltaCode = 36 + if mlBase <= 127 { + // Compiler insists on bounds check (Go 1.12) + return mlCodeTable[mlBase&127] + } + return uint8(highBit(mlBase)) + mlDeltaCode +} + +func ofCode(offset uint32) uint8 { + // A valid offset will always be > 0. + return uint8(bits.Len32(offset) - 1) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go new file mode 100644 index 00000000..9e1baad7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -0,0 +1,435 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +import ( + "encoding/binary" + "errors" + "hash/crc32" + "io" + + "github.com/klauspost/compress/huff0" + snappy "github.com/klauspost/compress/internal/snapref" +) + +const ( + snappyTagLiteral = 0x00 + snappyTagCopy1 = 0x01 + snappyTagCopy2 = 0x02 + snappyTagCopy4 = 0x03 +) + +const ( + snappyChecksumSize = 4 + snappyMagicBody = "sNaPpY" + + // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + snappyMaxBlockSize = 65536 + + // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + snappyMaxEncodedLenOfMaxBlockSize = 76490 +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var ( + // ErrSnappyCorrupt reports that the input is invalid. + ErrSnappyCorrupt = errors.New("snappy: corrupt input") + // ErrSnappyTooLarge reports that the uncompressed length is too large. + ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") + // ErrSnappyUnsupported reports that the input isn't supported. + ErrSnappyUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. +// Conversion is done by converting the stream directly from Snappy without intermediate +// full decoding. +// Therefore the compression ratio is much less than what can be done by a full decompression +// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without +// any errors being generated. +// No CRC value is being generated and not all CRC values of the Snappy stream are checked. +// However, it provides really fast recompression of Snappy streams. +// The converter can be reused to avoid allocations, even after errors. +type SnappyConverter struct { + r io.Reader + err error + buf []byte + block *blockEnc +} + +// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. +// If any error is detected on the Snappy stream it is returned. +// The number of bytes written is returned. +func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { + initPredefined() + r.err = nil + r.r = in + if r.block == nil { + r.block = &blockEnc{} + r.block.init() + } + r.block.initNewEncode() + if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { + r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) + } + r.block.litEnc.Reuse = huff0.ReusePolicyNone + var written int64 + var readHeader bool + { + var header []byte + var n int + header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) + + n, r.err = w.Write(header) + if r.err != nil { + return written, r.err + } + written += int64(n) + } + + for { + if !r.readFull(r.buf[:4], true) { + // Add empty last block + r.block.reset(nil) + r.block.last = true + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, err := w.Write(r.block.output) + if err != nil { + return written, err + } + written += int64(n) + + return written, r.err + } + chunkType := r.buf[0] + if !readHeader { + if chunkType != chunkTypeStreamIdentifier { + println("chunkType != chunkTypeStreamIdentifier", chunkType) + r.err = ErrSnappyCorrupt + return written, r.err + } + readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + println("chunkLen > len(r.buf)", chunkType) + r.err = ErrSnappyUnsupported + return written, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return written, r.err + } + //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[snappyChecksumSize:] + + n, hdr, err := snappyDecodedLen(buf) + if err != nil { + r.err = err + return written, r.err + } + buf = buf[hdr:] + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + r.block.pushOffsets() + if err := decodeSnappy(r.block, buf); err != nil { + r.err = err + return written, r.err + } + if r.block.size+r.block.extraLits != n { + printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) + r.err = ErrSnappyCorrupt + return written, r.err + } + err = r.block.encode(nil, false, false) + switch err { + case errIncompressible: + r.block.popOffsets() + r.block.reset(nil) + r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) + if err != nil { + return written, err + } + err = r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + case nil: + default: + return written, err + } + + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + case chunkTypeUncompressedData: + if debugEncoder { + println("Uncompressed, chunklen", chunkLen) + } + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < snappyChecksumSize { + println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.reset(nil) + buf := r.buf[:snappyChecksumSize] + if !r.readFull(buf, false) { + return written, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - snappyChecksumSize + if n > snappyMaxBlockSize { + println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) + r.err = ErrSnappyCorrupt + return written, r.err + } + r.block.literals = r.block.literals[:n] + if !r.readFull(r.block.literals, false) { + return written, r.err + } + if snappyCRC(r.block.literals) != checksum { + println("literals crc mismatch") + r.err = ErrSnappyCorrupt + return written, r.err + } + err := r.block.encodeLits(r.block.literals, false) + if err != nil { + return written, err + } + n, r.err = w.Write(r.block.output) + if r.err != nil { + return written, err + } + written += int64(n) + continue + + case chunkTypeStreamIdentifier: + if debugEncoder { + println("stream id", chunkLen, len(snappyMagicBody)) + } + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(snappyMagicBody) { + println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) + r.err = ErrSnappyCorrupt + return written, r.err + } + if !r.readFull(r.buf[:len(snappyMagicBody)], false) { + return written, r.err + } + for i := 0; i < len(snappyMagicBody); i++ { + if r.buf[i] != snappyMagicBody[i] { + println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) + r.err = ErrSnappyCorrupt + return written, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + println("chunkType <= 0x7f") + r.err = ErrSnappyUnsupported + return written, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return written, r.err + } + } +} + +// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read. +func decodeSnappy(blk *blockEnc, src []byte) error { + //decodeRef(make([]byte, snappyMaxBlockSize), src) + var s, length int + lits := blk.extraLits + var offset uint32 + for s < len(src) { + switch src[s] & 0x03 { + case snappyTagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, src) + return ErrSnappyCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + if x > snappyMaxBlockSize { + println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) + return ErrSnappyCorrupt + } + length = int(x) + 1 + if length <= 0 { + println("length <= 0 ", length) + + return errUnsupportedLiteralLength + } + //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { + // return ErrSnappyCorrupt + //} + + blk.literals = append(blk.literals, src[s:s+length]...) + //println(length, "litLen") + lits += length + s += length + continue + + case snappyTagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) + + case snappyTagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = uint32(src[s-2]) | uint32(src[s-1])<<8 + + case snappyTagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + println("uint(s) > uint(len(src)", s, len(src)) + return ErrSnappyCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + + if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { + println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) + + return ErrSnappyCorrupt + } + + // Check if offset is one of the recent offsets. + // Adjusts the output offset accordingly. + // Gives a tiny bit of compression, typically around 1%. + if false { + offset = blk.matchOffset(offset, uint32(lits)) + } else { + offset += 3 + } + + blk.sequences = append(blk.sequences, seq{ + litLen: uint32(lits), + offset: offset, + matchLen: uint32(length) - zstdMinMatch, + }) + blk.size += length + lits + lits = 0 + } + blk.extraLits = lits + return nil +} + +func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrSnappyCorrupt + } + return false + } + return true +} + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func snappyCRC(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return c>>15 | c<<17 + 0xa282ead8 +} + +// snappyDecodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrSnappyCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrSnappyTooLarge + } + return int(v), n, nil +} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go new file mode 100644 index 00000000..29c15c8c --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -0,0 +1,141 @@ +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +package zstd + +import ( + "errors" + "io" + "sync" +) + +// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. +// See https://www.winzip.com/win/en/comp_info.html +const ZipMethodWinZip = 93 + +// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. +// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. +// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT +const ZipMethodPKWare = 20 + +// zipReaderPool is the default reader pool. +var zipReaderPool = sync.Pool{New: func() interface{} { + z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) + if err != nil { + panic(err) + } + return z +}} + +// newZipReader creates a pooled zip decompressor. +func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { + pool := &zipReaderPool + if len(opts) > 0 { + opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) + // Force concurrency 1 + opts = append(opts, WithDecoderConcurrency(1)) + // Create our own pool + pool = &sync.Pool{} + } + return func(r io.Reader) io.ReadCloser { + dec, ok := pool.Get().(*Decoder) + if ok { + dec.Reset(r) + } else { + d, err := NewReader(r, opts...) + if err != nil { + panic(err) + } + dec = d + } + return &pooledZipReader{dec: dec, pool: pool} + } +} + +type pooledZipReader struct { + mu sync.Mutex // guards Close and Read + pool *sync.Pool + dec *Decoder +} + +func (r *pooledZipReader) Read(p []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.dec == nil { + return 0, errors.New("read after close or EOF") + } + dec, err := r.dec.Read(p) + if err == io.EOF { + r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return dec, err +} + +func (r *pooledZipReader) Close() error { + r.mu.Lock() + defer r.mu.Unlock() + var err error + if r.dec != nil { + err = r.dec.Reset(nil) + r.pool.Put(r.dec) + r.dec = nil + } + return err +} + +type pooledZipWriter struct { + mu sync.Mutex // guards Close and Read + enc *Encoder + pool *sync.Pool +} + +func (w *pooledZipWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + if w.enc == nil { + return 0, errors.New("Write after Close") + } + return w.enc.Write(p) +} + +func (w *pooledZipWriter) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + var err error + if w.enc != nil { + err = w.enc.Close() + w.pool.Put(w.enc) + w.enc = nil + } + return err +} + +// ZipCompressor returns a compressor that can be registered with zip libraries. +// The provided encoder options will be used on all encodes. +func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { + var pool sync.Pool + return func(w io.Writer) (io.WriteCloser, error) { + enc, ok := pool.Get().(*Encoder) + if ok { + enc.Reset(w) + } else { + var err error + enc, err = NewWriter(w, opts...) + if err != nil { + return nil, err + } + } + return &pooledZipWriter{enc: enc, pool: &pool}, nil + } +} + +// ZipDecompressor returns a decompressor that can be registered with zip libraries. +// See ZipCompressor for example. +// Options can be specified. WithDecoderConcurrency(1) is forced, +// and by default a 128MB maximum decompression window is specified. +// The window size can be overridden if required. +func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { + return newZipReader(opts...) +} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go new file mode 100644 index 00000000..3eb3f1c8 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -0,0 +1,152 @@ +// Package zstd provides decompression of zstandard files. +// +// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd +package zstd + +import ( + "bytes" + "encoding/binary" + "errors" + "log" + "math" + "math/bits" +) + +// enable debug printing +const debug = false + +// enable encoding debug printing +const debugEncoder = debug + +// enable decoding debug printing +const debugDecoder = debug + +// Enable extra assertions. +const debugAsserts = debug || false + +// print sequence details +const debugSequences = false + +// print detailed matching information +const debugMatches = false + +// force encoder to use predefined tables. +const forcePreDef = false + +// zstdMinMatch is the minimum zstd match length. +const zstdMinMatch = 3 + +// Reset the buffer offset when reaching this. +const bufferReset = math.MaxInt32 - MaxWindowSize + +// fcsUnknown is used for unknown frame content size. +const fcsUnknown = math.MaxUint64 + +var ( + // ErrReservedBlockType is returned when a reserved block type is found. + // Typically this indicates wrong or corrupted input. + ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") + + // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. + // Typically this indicates wrong or corrupted input. + ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") + + // ErrBlockTooSmall is returned when a block is too small to be decoded. + // Typically returned on invalid input. + ErrBlockTooSmall = errors.New("block too small") + + // ErrUnexpectedBlockSize is returned when a block has unexpected size. + // Typically returned on invalid input. + ErrUnexpectedBlockSize = errors.New("unexpected block size") + + // ErrMagicMismatch is returned when a "magic" number isn't what is expected. + // Typically this indicates wrong or corrupted input. + ErrMagicMismatch = errors.New("invalid input: magic number mismatch") + + // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeExceeded = errors.New("window size exceeded") + + // ErrWindowSizeTooSmall is returned when no window size is specified. + // Typically this indicates wrong or corrupted input. + ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") + + // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. + ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") + + // ErrUnknownDictionary is returned if the dictionary ID is unknown. + // For the time being dictionaries are not supported. + ErrUnknownDictionary = errors.New("unknown dictionary") + + // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeExceeded = errors.New("frame size exceeded") + + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + + // ErrCRCMismatch is returned if CRC mismatches. + ErrCRCMismatch = errors.New("CRC check failed") + + // ErrDecoderClosed will be returned if the Decoder was used after + // Close has been called. + ErrDecoderClosed = errors.New("decoder used after Close") + + // ErrDecoderNilInput is returned when a nil Reader was provided + // and an operation other than Reset/DecodeAll/Close was attempted. + ErrDecoderNilInput = errors.New("nil input provided as reader") +) + +func println(a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Println(a...) + } +} + +func printf(format string, a ...interface{}) { + if debug || debugDecoder || debugEncoder { + log.Printf(format, a...) + } +} + +// matchLen returns the maximum length. +// a must be the shortest of the two. +// The function also returns whether all bytes matched. +func matchLen(a, b []byte) int { + b = b[:len(a)] + for i := 0; i < len(a)-7; i += 8 { + if diff := load64(a, i) ^ load64(b, i); diff != 0 { + return i + (bits.TrailingZeros64(diff) >> 3) + } + } + + checked := (len(a) >> 3) << 3 + a = a[checked:] + b = b[checked:] + for i := range a { + if a[i] != b[i] { + return i + checked + } + } + return len(a) + checked +} + +func load3232(b []byte, i int32) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +func load6432(b []byte, i int32) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +func load64(b []byte, i int) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +type byter interface { + Bytes() []byte + Len() int +} + +var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE new file mode 100644 index 00000000..f9c841a5 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md new file mode 100644 index 00000000..d70706d5 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/README.md @@ -0,0 +1,14 @@ +# go-homedir + +This is a Go library for detecting the user's home directory without +the use of cgo, so the library can be used in cross-compilation environments. + +Usage is incredibly simple, just call `homedir.Dir()` to get the home directory +for a user, and `homedir.Expand()` to expand the `~` in a path to the home +directory. + +**Why not just use `os/user`?** The built-in `os/user` package requires +cgo on Darwin systems. This means that any Go code that uses that package +cannot cross compile. But 99% of the time the use for `os/user` is just to +retrieve the home directory, which we can do for the current user without +cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go new file mode 100644 index 00000000..25378537 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -0,0 +1,167 @@ +package homedir + +import ( + "bytes" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" +) + +// DisableCache will disable caching of the home directory. Caching is enabled +// by default. +var DisableCache bool + +var homedirCache string +var cacheLock sync.RWMutex + +// Dir returns the home directory for the executing user. +// +// This uses an OS-specific method for discovering the home directory. +// An error is returned if a home directory cannot be detected. +func Dir() (string, error) { + if !DisableCache { + cacheLock.RLock() + cached := homedirCache + cacheLock.RUnlock() + if cached != "" { + return cached, nil + } + } + + cacheLock.Lock() + defer cacheLock.Unlock() + + var result string + var err error + if runtime.GOOS == "windows" { + result, err = dirWindows() + } else { + // Unix-like system, so just assume Unix + result, err = dirUnix() + } + + if err != nil { + return "", err + } + homedirCache = result + return result, nil +} + +// Expand expands the path to include the home directory if the path +// is prefixed with `~`. If it isn't prefixed with `~`, the path is +// returned as-is. +func Expand(path string) (string, error) { + if len(path) == 0 { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := Dir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +// Reset clears the cache, forcing the next call to Dir to re-detect +// the home directory. This generally never has to be called, but can be +// useful in tests if you're modifying the home directory via the HOME +// env var or something. +func Reset() { + cacheLock.Lock() + defer cacheLock.Unlock() + homedirCache = "" +} + +func dirUnix() (string, error) { + homeEnv := "HOME" + if runtime.GOOS == "plan9" { + // On plan9, env vars are lowercase. + homeEnv = "home" + } + + // First prefer the HOME environmental variable + if home := os.Getenv(homeEnv); home != "" { + return home, nil + } + + var stdout bytes.Buffer + + // If that fails, try OS specific commands + if runtime.GOOS == "darwin" { + cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) + cmd.Stdout = &stdout + if err := cmd.Run(); err == nil { + result := strings.TrimSpace(stdout.String()) + if result != "" { + return result, nil + } + } + } else { + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + // If the error is ErrNotFound, we ignore it. Otherwise, return it. + if err != exec.ErrNotFound { + return "", err + } + } else { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5], nil + } + } + } + } + + // If all else fails, try the shell + stdout.Reset() + cmd := exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", errors.New("blank output when reading home directory") + } + + return result, nil +} + +func dirWindows() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // Prefer standard environment variable USERPROFILE + if home := os.Getenv("USERPROFILE"); home != "" { + return home, nil + } + + drive := os.Getenv("HOMEDRIVE") + path := os.Getenv("HOMEPATH") + home := drive + path + if drive == "" || path == "" { + return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") + } + + return home, nil +} diff --git a/vendor/github.com/opencontainers/go-digest/.mailmap b/vendor/github.com/opencontainers/go-digest/.mailmap new file mode 100644 index 00000000..eaf8b2f9 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.mailmap @@ -0,0 +1,4 @@ +Aaron Lehmann +Derek McGowan +Stephen J Day +Haibing Zhou diff --git a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml new file mode 100644 index 00000000..b6165f83 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml @@ -0,0 +1,28 @@ +version: 2 + +requirements: + signed_off_by: + required: true + +always_pending: + title_regex: '^WIP' + explanation: 'Work in progress...' + +group_defaults: + required: 2 + approve_by_comment: + enabled: true + approve_regex: '^LGTM' + reject_regex: '^Rejected' + reset_on_push: + enabled: true + author_approval: + ignored: true + conditions: + branches: + - master + +groups: + go-digest: + teams: + - go-digest-maintainers diff --git a/vendor/github.com/opencontainers/go-digest/.travis.yml b/vendor/github.com/opencontainers/go-digest/.travis.yml new file mode 100644 index 00000000..5775f885 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - 1.12.x + - 1.13.x + - master diff --git a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md new file mode 100644 index 00000000..e4d962ac --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md @@ -0,0 +1,72 @@ +# Contributing to Docker open source projects + +Want to hack on this project? Awesome! Here are instructions to get you started. + +This project is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +For an in-depth description of our contribution process, visit the +contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE b/vendor/github.com/opencontainers/go-digest/LICENSE new file mode 100644 index 00000000..3ac8ab64 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/LICENSE @@ -0,0 +1,192 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2019, 2020 OCI Contributors + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.docs b/vendor/github.com/opencontainers/go-digest/LICENSE.docs new file mode 100644 index 00000000..e26cd4fc --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/opencontainers/go-digest/MAINTAINERS b/vendor/github.com/opencontainers/go-digest/MAINTAINERS new file mode 100644 index 00000000..843b1b20 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/MAINTAINERS @@ -0,0 +1,5 @@ +Derek McGowan (@dmcgowan) +Stephen Day (@stevvooe) +Vincent Batts (@vbatts) +Akihiro Suda (@AkihiroSuda) +Sebastiaan van Stijn (@thaJeztah) diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md new file mode 100644 index 00000000..a1128720 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/README.md @@ -0,0 +1,96 @@ +# go-digest + +[![GoDoc](https://godoc.org/github.com/opencontainers/go-digest?status.svg)](https://godoc.org/github.com/opencontainers/go-digest) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/go-digest)](https://goreportcard.com/report/github.com/opencontainers/go-digest) [![Build Status](https://travis-ci.org/opencontainers/go-digest.svg?branch=master)](https://travis-ci.org/opencontainers/go-digest) + +Common digest package used across the container ecosystem. + +Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information. + +# What is a digest? + +A digest is just a [hash](https://en.wikipedia.org/wiki/Hash_function). + +The most common use case for a digest is to create a content identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) systems: + +```go +id := digest.FromBytes([]byte("my content")) +``` + +In the example above, the id can be used to uniquely identify the byte slice "my content". +This allows two disparate applications to agree on a verifiable identifier without having to trust one another. + +An identifying digest can be verified, as follows: + +```go +if id != digest.FromBytes([]byte("my content")) { + return errors.New("the content has changed!") +} +``` + +A `Verifier` type can be used to handle cases where an `io.Reader` makes more sense: + +```go +rd := getContent() +verifier := id.Verifier() +io.Copy(verifier, rd) + +if !verifier.Verified() { + return errors.New("the content has changed!") +} +``` + +Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this can power a rich, safe, content distribution system. + +# Usage + +While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is considered the best resource, a few important items need to be called out when using this package. + +1. Make sure to import the hash implementations into your application or the package will panic. + You should have something like the following in the main (or other entrypoint) of your application: + + ```go + import ( + _ "crypto/sha256" + _ "crypto/sha512" + ) + ``` + This may seem inconvenient but it allows you replace the hash + implementations with others, such as https://github.com/stevvooe/resumable. + +2. Even though `digest.Digest` may be assemblable as a string, _always_ verify your input with `digest.Parse` or use `Digest.Validate` when accepting untrusted input. + While there are measures to avoid common problems, this will ensure you have valid digests in the rest of your application. + +3. While alternative encodings of hash values (digests) are possible (for example, base64), this package deals exclusively with hex-encoded digests. + +# Stability + +The Go API, at this stage, is considered stable, unless otherwise noted. + +As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest). + +# Contributing + +This package is considered fairly complete. +It has been in production in thousands (millions?) of deployments and is fairly battle-hardened. +New additions will be met with skepticism. +If you think there is a missing feature, please file a bug clearly describing the problem and the alternatives you tried before submitting a PR. + +## Code of Conduct + +Participation in the OpenContainers community is governed by [OpenContainer's Code of Conduct][code-of-conduct]. + +## Security + +If you find an issue, please follow the [security][security] protocol to report it. + +# Copyright and license + +Copyright © 2019, 2020 OCI Contributors +Copyright © 2016 Docker, Inc. +All rights reserved, except as follows. +Code is released under the [Apache 2.0 license](LICENSE). +This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). +You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. + +[security]: https://github.com/opencontainers/org/blob/master/security +[code-of-conduct]: https://github.com/opencontainers/org/blob/master/CODE_OF_CONDUCT.md diff --git a/vendor/github.com/opencontainers/go-digest/algorithm.go b/vendor/github.com/opencontainers/go-digest/algorithm.go new file mode 100644 index 00000000..490951dc --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/algorithm.go @@ -0,0 +1,193 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "crypto" + "fmt" + "hash" + "io" + "regexp" +) + +// Algorithm identifies and implementation of a digester by an identifier. +// Note the that this defines both the hash algorithm used and the string +// encoding. +type Algorithm string + +// supported digest types +const ( + SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only) + SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only) + SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only) + + // Canonical is the primary digest algorithm used with the distribution + // project. Other digests may be used but this one is the primary storage + // digest. + Canonical = SHA256 +) + +var ( + // TODO(stevvooe): Follow the pattern of the standard crypto package for + // registration of digests. Effectively, we are a registerable set and + // common symbol access. + + // algorithms maps values to hash.Hash implementations. Other algorithms + // may be available but they cannot be calculated by the digest package. + algorithms = map[Algorithm]crypto.Hash{ + SHA256: crypto.SHA256, + SHA384: crypto.SHA384, + SHA512: crypto.SHA512, + } + + // anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests. + // Note that /A-F/ disallowed. + anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{ + SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`), + SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`), + SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`), + } +) + +// Available returns true if the digest type is available for use. If this +// returns false, Digester and Hash will return nil. +func (a Algorithm) Available() bool { + h, ok := algorithms[a] + if !ok { + return false + } + + // check availability of the hash, as well + return h.Available() +} + +func (a Algorithm) String() string { + return string(a) +} + +// Size returns number of bytes returned by the hash. +func (a Algorithm) Size() int { + h, ok := algorithms[a] + if !ok { + return 0 + } + return h.Size() +} + +// Set implemented to allow use of Algorithm as a command line flag. +func (a *Algorithm) Set(value string) error { + if value == "" { + *a = Canonical + } else { + // just do a type conversion, support is queried with Available. + *a = Algorithm(value) + } + + if !a.Available() { + return ErrDigestUnsupported + } + + return nil +} + +// Digester returns a new digester for the specified algorithm. If the algorithm +// does not have a digester implementation, nil will be returned. This can be +// checked by calling Available before calling Digester. +func (a Algorithm) Digester() Digester { + return &digester{ + alg: a, + hash: a.Hash(), + } +} + +// Hash returns a new hash as used by the algorithm. If not available, the +// method will panic. Check Algorithm.Available() before calling. +func (a Algorithm) Hash() hash.Hash { + if !a.Available() { + // Empty algorithm string is invalid + if a == "" { + panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()")) + } + + // NOTE(stevvooe): A missing hash is usually a programming error that + // must be resolved at compile time. We don't import in the digest + // package to allow users to choose their hash implementation (such as + // when using stevvooe/resumable or a hardware accelerated package). + // + // Applications that may want to resolve the hash at runtime should + // call Algorithm.Available before call Algorithm.Hash(). + panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) + } + + return algorithms[a].New() +} + +// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into +// the encoded portion of the digest. +func (a Algorithm) Encode(d []byte) string { + // TODO(stevvooe): Currently, all algorithms use a hex encoding. When we + // add support for back registration, we can modify this accordingly. + return fmt.Sprintf("%x", d) +} + +// FromReader returns the digest of the reader using the algorithm. +func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { + digester := a.Digester() + + if _, err := io.Copy(digester.Hash(), rd); err != nil { + return "", err + } + + return digester.Digest(), nil +} + +// FromBytes digests the input and returns a Digest. +func (a Algorithm) FromBytes(p []byte) Digest { + digester := a.Digester() + + if _, err := digester.Hash().Write(p); err != nil { + // Writes to a Hash should never fail. None of the existing + // hash implementations in the stdlib or hashes vendored + // here can return errors from Write. Having a panic in this + // condition instead of having FromBytes return an error value + // avoids unnecessary error handling paths in all callers. + panic("write to hash function returned error: " + err.Error()) + } + + return digester.Digest() +} + +// FromString digests the string input and returns a Digest. +func (a Algorithm) FromString(s string) Digest { + return a.FromBytes([]byte(s)) +} + +// Validate validates the encoded portion string +func (a Algorithm) Validate(encoded string) error { + r, ok := anchoredEncodedRegexps[a] + if !ok { + return ErrDigestUnsupported + } + // Digests much always be hex-encoded, ensuring that their hex portion will + // always be size*2 + if a.Size()*2 != len(encoded) { + return ErrDigestInvalidLength + } + if r.MatchString(encoded) { + return nil + } + return ErrDigestInvalidFormat +} diff --git a/vendor/github.com/opencontainers/go-digest/digest.go b/vendor/github.com/opencontainers/go-digest/digest.go new file mode 100644 index 00000000..518b5e71 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/digest.go @@ -0,0 +1,157 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "fmt" + "hash" + "io" + "regexp" + "strings" +) + +// Digest allows simple protection of hex formatted digest strings, prefixed +// by their algorithm. Strings of type Digest have some guarantee of being in +// the correct format and it provides quick access to the components of a +// digest string. +// +// The following is an example of the contents of Digest types: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// This allows to abstract the digest behind this type and work only in those +// terms. +type Digest string + +// NewDigest returns a Digest from alg and a hash.Hash object. +func NewDigest(alg Algorithm, h hash.Hash) Digest { + return NewDigestFromBytes(alg, h.Sum(nil)) +} + +// NewDigestFromBytes returns a new digest from the byte contents of p. +// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) +// functions. This is also useful for rebuilding digests from binary +// serializations. +func NewDigestFromBytes(alg Algorithm, p []byte) Digest { + return NewDigestFromEncoded(alg, alg.Encode(p)) +} + +// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded. +func NewDigestFromHex(alg, hex string) Digest { + return NewDigestFromEncoded(Algorithm(alg), hex) +} + +// NewDigestFromEncoded returns a Digest from alg and the encoded digest. +func NewDigestFromEncoded(alg Algorithm, encoded string) Digest { + return Digest(fmt.Sprintf("%s:%s", alg, encoded)) +} + +// DigestRegexp matches valid digest types. +var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`) + +// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. +var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) + +var ( + // ErrDigestInvalidFormat returned when digest format invalid. + ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") + + // ErrDigestInvalidLength returned when digest has invalid length. + ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") + + // ErrDigestUnsupported returned when the digest algorithm is unsupported. + ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") +) + +// Parse parses s and returns the validated digest object. An error will +// be returned if the format is invalid. +func Parse(s string) (Digest, error) { + d := Digest(s) + return d, d.Validate() +} + +// FromReader consumes the content of rd until io.EOF, returning canonical digest. +func FromReader(rd io.Reader) (Digest, error) { + return Canonical.FromReader(rd) +} + +// FromBytes digests the input and returns a Digest. +func FromBytes(p []byte) Digest { + return Canonical.FromBytes(p) +} + +// FromString digests the input and returns a Digest. +func FromString(s string) Digest { + return Canonical.FromString(s) +} + +// Validate checks that the contents of d is a valid digest, returning an +// error if not. +func (d Digest) Validate() error { + s := string(d) + i := strings.Index(s, ":") + if i <= 0 || i+1 == len(s) { + return ErrDigestInvalidFormat + } + algorithm, encoded := Algorithm(s[:i]), s[i+1:] + if !algorithm.Available() { + if !DigestRegexpAnchored.MatchString(s) { + return ErrDigestInvalidFormat + } + return ErrDigestUnsupported + } + return algorithm.Validate(encoded) +} + +// Algorithm returns the algorithm portion of the digest. This will panic if +// the underlying digest is not in a valid format. +func (d Digest) Algorithm() Algorithm { + return Algorithm(d[:d.sepIndex()]) +} + +// Verifier returns a writer object that can be used to verify a stream of +// content against the digest. If the digest is invalid, the method will panic. +func (d Digest) Verifier() Verifier { + return hashVerifier{ + hash: d.Algorithm().Hash(), + digest: d, + } +} + +// Encoded returns the encoded portion of the digest. This will panic if the +// underlying digest is not in a valid format. +func (d Digest) Encoded() string { + return string(d[d.sepIndex()+1:]) +} + +// Hex is deprecated. Please use Digest.Encoded. +func (d Digest) Hex() string { + return d.Encoded() +} + +func (d Digest) String() string { + return string(d) +} + +func (d Digest) sepIndex() int { + i := strings.Index(string(d), ":") + + if i < 0 { + panic(fmt.Sprintf("no ':' separator in digest %q", d)) + } + + return i +} diff --git a/vendor/github.com/opencontainers/go-digest/digester.go b/vendor/github.com/opencontainers/go-digest/digester.go new file mode 100644 index 00000000..ede90775 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/digester.go @@ -0,0 +1,40 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import "hash" + +// Digester calculates the digest of written data. Writes should go directly +// to the return value of Hash, while calling Digest will return the current +// value of the digest. +type Digester interface { + Hash() hash.Hash // provides direct access to underlying hash instance. + Digest() Digest +} + +// digester provides a simple digester definition that embeds a hasher. +type digester struct { + alg Algorithm + hash hash.Hash +} + +func (d *digester) Hash() hash.Hash { + return d.hash +} + +func (d *digester) Digest() Digest { + return NewDigest(d.alg, d.hash) +} diff --git a/vendor/github.com/opencontainers/go-digest/doc.go b/vendor/github.com/opencontainers/go-digest/doc.go new file mode 100644 index 00000000..83d3a936 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/doc.go @@ -0,0 +1,62 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package digest provides a generalized type to opaquely represent message +// digests and their operations within the registry. The Digest type is +// designed to serve as a flexible identifier in a content-addressable system. +// More importantly, it provides tools and wrappers to work with +// hash.Hash-based digests with little effort. +// +// Basics +// +// The format of a digest is simply a string with two parts, dubbed the +// "algorithm" and the "digest", separated by a colon: +// +// : +// +// An example of a sha256 digest representation follows: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// The "algorithm" portion defines both the hashing algorithm used to calculate +// the digest and the encoding of the resulting digest, which defaults to "hex" +// if not otherwise specified. Currently, all supported algorithms have their +// digests encoded in hex strings. +// +// In the example above, the string "sha256" is the algorithm and the hex bytes +// are the "digest". +// +// Because the Digest type is simply a string, once a valid Digest is +// obtained, comparisons are cheap, quick and simple to express with the +// standard equality operator. +// +// Verification +// +// The main benefit of using the Digest type is simple verification against a +// given digest. The Verifier interface, modeled after the stdlib hash.Hash +// interface, provides a common write sink for digest verification. After +// writing is complete, calling the Verifier.Verified method will indicate +// whether or not the stream of bytes matches the target digest. +// +// Missing Features +// +// In addition to the above, we intend to add the following features to this +// package: +// +// 1. A Digester type that supports write sink digest calculation. +// +// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. +// +package digest diff --git a/vendor/github.com/opencontainers/go-digest/verifiers.go b/vendor/github.com/opencontainers/go-digest/verifiers.go new file mode 100644 index 00000000..afef506f --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/verifiers.go @@ -0,0 +1,46 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "hash" + "io" +) + +// Verifier presents a general verification interface to be used with message +// digests and other byte stream verifications. Users instantiate a Verifier +// from one of the various methods, write the data under test to it then check +// the result with the Verified method. +type Verifier interface { + io.Writer + + // Verified will return true if the content written to Verifier matches + // the digest. + Verified() bool +} + +type hashVerifier struct { + digest Digest + hash hash.Hash +} + +func (hv hashVerifier) Write(p []byte) (n int, err error) { + return hv.hash.Write(p) +} + +func (hv hashVerifier) Verified() bool { + return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) +} diff --git a/vendor/github.com/opencontainers/image-spec/LICENSE b/vendor/github.com/opencontainers/image-spec/LICENSE new file mode 100644 index 00000000..9fdc20fd --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 The Linux Foundation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go new file mode 100644 index 00000000..6f9e6fd3 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -0,0 +1,71 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339). + AnnotationCreated = "org.opencontainers.image.created" + + // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string). + AnnotationAuthors = "org.opencontainers.image.authors" + + // AnnotationURL is the annotation key for the URL to find more information on the image. + AnnotationURL = "org.opencontainers.image.url" + + // AnnotationDocumentation is the annotation key for the URL to get documentation on the image. + AnnotationDocumentation = "org.opencontainers.image.documentation" + + // AnnotationSource is the annotation key for the URL to get source code for building the image. + AnnotationSource = "org.opencontainers.image.source" + + // AnnotationVersion is the annotation key for the version of the packaged software. + // The version MAY match a label or tag in the source code repository. + // The version MAY be Semantic versioning-compatible. + AnnotationVersion = "org.opencontainers.image.version" + + // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software. + AnnotationRevision = "org.opencontainers.image.revision" + + // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual. + AnnotationVendor = "org.opencontainers.image.vendor" + + // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression. + AnnotationLicenses = "org.opencontainers.image.licenses" + + // AnnotationRefName is the annotation key for the name of the reference for a target. + // SHOULD only be considered valid when on descriptors on `index.json` within image layout. + AnnotationRefName = "org.opencontainers.image.ref.name" + + // AnnotationTitle is the annotation key for the human-readable title of the image. + AnnotationTitle = "org.opencontainers.image.title" + + // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. + AnnotationDescription = "org.opencontainers.image.description" + + // AnnotationBaseImageDigest is the annotation key for the digest of the image's base image. + AnnotationBaseImageDigest = "org.opencontainers.image.base.digest" + + // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. + AnnotationBaseImageName = "org.opencontainers.image.base.name" + + // AnnotationArtifactCreated is the annotation key for the date and time on which the artifact was built, conforming to RFC 3339. + AnnotationArtifactCreated = "org.opencontainers.artifact.created" + + // AnnotationArtifactDescription is the annotation key for the human readable description for the artifact. + AnnotationArtifactDescription = "org.opencontainers.artifact.description" + + // AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing. + AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied" +) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go new file mode 100644 index 00000000..03d76ce4 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go @@ -0,0 +1,34 @@ +// Copyright 2022 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +// Artifact describes an artifact manifest. +// This structure provides `application/vnd.oci.artifact.manifest.v1+json` mediatype when marshalled to JSON. +type Artifact struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType"` + + // ArtifactType is the IANA media type of the artifact this schema refers to. + ArtifactType string `json:"artifactType"` + + // Blobs is a collection of blobs referenced by this manifest. + Blobs []Descriptor `json:"blobs,omitempty"` + + // Subject (reference) is an optional link from the artifact to another manifest forming an association between the artifact and the other manifest. + Subject *Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the artifact manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go new file mode 100644 index 00000000..ffff4b6d --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -0,0 +1,114 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "time" + + digest "github.com/opencontainers/go-digest" +) + +// ImageConfig defines the execution parameters which should be used as a base when running a container using an image. +type ImageConfig struct { + // User defines the username or UID which the process in the container should run as. + User string `json:"User,omitempty"` + + // ExposedPorts a set of ports to expose from a container running this image. + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + + // Env is a list of environment variables to be used in a container. + Env []string `json:"Env,omitempty"` + + // Entrypoint defines a list of arguments to use as the command to execute when the container starts. + Entrypoint []string `json:"Entrypoint,omitempty"` + + // Cmd defines the default arguments to the entrypoint of the container. + Cmd []string `json:"Cmd,omitempty"` + + // Volumes is a set of directories describing where the process is likely write data specific to a container instance. + Volumes map[string]struct{} `json:"Volumes,omitempty"` + + // WorkingDir sets the current working directory of the entrypoint process in the container. + WorkingDir string `json:"WorkingDir,omitempty"` + + // Labels contains arbitrary metadata for the container. + Labels map[string]string `json:"Labels,omitempty"` + + // StopSignal contains the system call signal that will be sent to the container to exit. + StopSignal string `json:"StopSignal,omitempty"` +} + +// RootFS describes a layer content addresses +type RootFS struct { + // Type is the type of the rootfs. + Type string `json:"type"` + + // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. + DiffIDs []digest.Digest `json:"diff_ids"` +} + +// History describes the history of a layer. +type History struct { + // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // CreatedBy is the command which created the layer. + CreatedBy string `json:"created_by,omitempty"` + + // Author is the author of the build point. + Author string `json:"author,omitempty"` + + // Comment is a custom message set when creating the layer. + Comment string `json:"comment,omitempty"` + + // EmptyLayer is used to mark if the history item created a filesystem diff. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Image is the JSON structure which describes some basic information about the image. +// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. +type Image struct { + // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. + Author string `json:"author,omitempty"` + + // Architecture is the CPU architecture which the binaries in this image are built to run on. + Architecture string `json:"architecture"` + + // Variant is the variant of the specified CPU architecture which image binaries are intended to run on. + Variant string `json:"variant,omitempty"` + + // OS is the name of the operating system which the image is built to run on. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example on Windows `10.0.14393.1066`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Config defines the execution parameters which should be used as a base when running a container using the image. + Config ImageConfig `json:"config,omitempty"` + + // RootFS references the layer content addresses used by the image. + RootFS RootFS `json:"rootfs"` + + // History describes the history of each layer. + History []History `json:"history,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go new file mode 100644 index 00000000..9654aa5a --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -0,0 +1,72 @@ +// Copyright 2016-2022 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import digest "github.com/opencontainers/go-digest" + +// Descriptor describes the disposition of targeted content. +// This structure provides `application/vnd.oci.descriptor.v1+json` mediatype +// when marshalled to JSON. +type Descriptor struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType,omitempty"` + + // Digest is the digest of the targeted content. + Digest digest.Digest `json:"digest"` + + // Size specifies the size in bytes of the blob. + Size int64 `json:"size"` + + // URLs specifies a list of URLs from which this object MAY be downloaded + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Data is an embedding of the targeted content. This is encoded as a base64 + // string when marshalled to JSON (automatically, by encoding/json). If + // present, Data can be used directly to avoid fetching the targeted content. + Data []byte `json:"data,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // + // This should only be used when referring to a manifest. + Platform *Platform `json:"platform,omitempty"` + + // ArtifactType is the IANA media type of this artifact. + ArtifactType string `json:"artifactType,omitempty"` +} + +// Platform describes the platform which the image in the manifest runs on. +type Platform struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example on Windows `10.0.14393.1066`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `v7` to specify ARMv7 when architecture is `arm`. + Variant string `json:"variant,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go new file mode 100644 index 00000000..ed4a56e5 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Index references manifests for various platforms. +// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON. +type Index struct { + specs.Versioned + + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` + MediaType string `json:"mediaType,omitempty"` + + // Manifests references platform specific manifests. + Manifests []Descriptor `json:"manifests"` + + // Annotations contains arbitrary metadata for the image index. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go new file mode 100644 index 00000000..fc79e9e0 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // ImageLayoutFile is the file name of oci image layout file + ImageLayoutFile = "oci-layout" + // ImageLayoutVersion is the version of ImageLayout + ImageLayoutVersion = "1.0.0" +) + +// ImageLayout is the structure in the "oci-layout" file, found in the root +// of an OCI Image-layout directory. +type ImageLayout struct { + Version string `json:"imageLayoutVersion"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go new file mode 100644 index 00000000..730a0935 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -0,0 +1,38 @@ +// Copyright 2016-2022 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Manifest provides `application/vnd.oci.image.manifest.v1+json` mediatype structure when marshalled to JSON. +type Manifest struct { + specs.Versioned + + // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` + MediaType string `json:"mediaType,omitempty"` + + // Config references a configuration object for a container, by digest. + // The referenced configuration object is a JSON blob that the runtime uses to set up the container. + Config Descriptor `json:"config"` + + // Layers is an indexed list of layers referenced by the manifest. + Layers []Descriptor `json:"layers"` + + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the image manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go new file mode 100644 index 00000000..935b481e --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -0,0 +1,60 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // MediaTypeDescriptor specifies the media type for a content descriptor. + MediaTypeDescriptor = "application/vnd.oci.descriptor.v1+json" + + // MediaTypeLayoutHeader specifies the media type for the oci-layout. + MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" + + // MediaTypeImageManifest specifies the media type for an image manifest. + MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" + + // MediaTypeImageIndex specifies the media type for an image index. + MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + + // MediaTypeImageLayer is the media type used for layers referenced by the manifest. + MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" + + // MediaTypeImageLayerGzip is the media type used for gzipped layers + // referenced by the manifest. + MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + + // MediaTypeImageLayerZstd is the media type used for zstd compressed + // layers referenced by the manifest. + MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" + + // MediaTypeImageLayerNonDistributable is the media type for layers referenced by + // the manifest but with distribution restrictions. + MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" + + // MediaTypeImageLayerNonDistributableGzip is the media type for + // gzipped layers referenced by the manifest but with distribution + // restrictions. + MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + + // MediaTypeImageLayerNonDistributableZstd is the media type for zstd + // compressed layers referenced by the manifest but with distribution + // restrictions. + MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" + + // MediaTypeImageConfig specifies the media type for the image configuration. + MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" + + // MediaTypeArtifactManifest specifies the media type for a content descriptor. + MediaTypeArtifactManifest = "application/vnd.oci.artifact.manifest.v1+json" +) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go new file mode 100644 index 00000000..d2790357 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 1 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 1 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 0 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "-rc2" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go new file mode 100644 index 00000000..58a1510f --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go @@ -0,0 +1,23 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` +} diff --git a/vendor/github.com/vbatts/tar-split/LICENSE b/vendor/github.com/vbatts/tar-split/LICENSE new file mode 100644 index 00000000..ca03685b --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2015 Vincent Batts, Raleigh, NC, USA + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors +may be used to endorse or promote products derived from this software without +specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/common.go b/vendor/github.com/vbatts/tar-split/archive/tar/common.go new file mode 100644 index 00000000..dee9e47e --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/common.go @@ -0,0 +1,723 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tar implements access to tar archives. +// +// Tape archives (tar) are a file format for storing a sequence of files that +// can be read and written in a streaming manner. +// This package aims to cover most variations of the format, +// including those produced by GNU and BSD tar tools. +package tar + +import ( + "errors" + "fmt" + "math" + "os" + "path" + "reflect" + "strconv" + "strings" + "time" +) + +// BUG: Use of the Uid and Gid fields in Header could overflow on 32-bit +// architectures. If a large value is encountered when decoding, the result +// stored in Header will be the truncated version. + +var ( + ErrHeader = errors.New("archive/tar: invalid tar header") + ErrWriteTooLong = errors.New("archive/tar: write too long") + ErrFieldTooLong = errors.New("archive/tar: header field too long") + ErrWriteAfterClose = errors.New("archive/tar: write after close") + errMissData = errors.New("archive/tar: sparse file references non-existent data") + errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data") + errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole") +) + +type headerError []string + +func (he headerError) Error() string { + const prefix = "archive/tar: cannot encode header" + var ss []string + for _, s := range he { + if s != "" { + ss = append(ss, s) + } + } + if len(ss) == 0 { + return prefix + } + return fmt.Sprintf("%s: %v", prefix, strings.Join(ss, "; and ")) +} + +// Type flags for Header.Typeflag. +const ( + // Type '0' indicates a regular file. + TypeReg = '0' + TypeRegA = '\x00' // Deprecated: Use TypeReg instead. + + // Type '1' to '6' are header-only flags and may not have a data body. + TypeLink = '1' // Hard link + TypeSymlink = '2' // Symbolic link + TypeChar = '3' // Character device node + TypeBlock = '4' // Block device node + TypeDir = '5' // Directory + TypeFifo = '6' // FIFO node + + // Type '7' is reserved. + TypeCont = '7' + + // Type 'x' is used by the PAX format to store key-value records that + // are only relevant to the next file. + // This package transparently handles these types. + TypeXHeader = 'x' + + // Type 'g' is used by the PAX format to store key-value records that + // are relevant to all subsequent files. + // This package only supports parsing and composing such headers, + // but does not currently support persisting the global state across files. + TypeXGlobalHeader = 'g' + + // Type 'S' indicates a sparse file in the GNU format. + TypeGNUSparse = 'S' + + // Types 'L' and 'K' are used by the GNU format for a meta file + // used to store the path or link name for the next file. + // This package transparently handles these types. + TypeGNULongName = 'L' + TypeGNULongLink = 'K' +) + +// Keywords for PAX extended header records. +const ( + paxNone = "" // Indicates that no PAX key is suitable + paxPath = "path" + paxLinkpath = "linkpath" + paxSize = "size" + paxUid = "uid" + paxGid = "gid" + paxUname = "uname" + paxGname = "gname" + paxMtime = "mtime" + paxAtime = "atime" + paxCtime = "ctime" // Removed from later revision of PAX spec, but was valid + paxCharset = "charset" // Currently unused + paxComment = "comment" // Currently unused + + paxSchilyXattr = "SCHILY.xattr." + + // Keywords for GNU sparse files in a PAX extended header. + paxGNUSparse = "GNU.sparse." + paxGNUSparseNumBlocks = "GNU.sparse.numblocks" + paxGNUSparseOffset = "GNU.sparse.offset" + paxGNUSparseNumBytes = "GNU.sparse.numbytes" + paxGNUSparseMap = "GNU.sparse.map" + paxGNUSparseName = "GNU.sparse.name" + paxGNUSparseMajor = "GNU.sparse.major" + paxGNUSparseMinor = "GNU.sparse.minor" + paxGNUSparseSize = "GNU.sparse.size" + paxGNUSparseRealSize = "GNU.sparse.realsize" +) + +// basicKeys is a set of the PAX keys for which we have built-in support. +// This does not contain "charset" or "comment", which are both PAX-specific, +// so adding them as first-class features of Header is unlikely. +// Users can use the PAXRecords field to set it themselves. +var basicKeys = map[string]bool{ + paxPath: true, paxLinkpath: true, paxSize: true, paxUid: true, paxGid: true, + paxUname: true, paxGname: true, paxMtime: true, paxAtime: true, paxCtime: true, +} + +// A Header represents a single header in a tar archive. +// Some fields may not be populated. +// +// For forward compatibility, users that retrieve a Header from Reader.Next, +// mutate it in some ways, and then pass it back to Writer.WriteHeader +// should do so by creating a new Header and copying the fields +// that they are interested in preserving. +type Header struct { + // Typeflag is the type of header entry. + // The zero value is automatically promoted to either TypeReg or TypeDir + // depending on the presence of a trailing slash in Name. + Typeflag byte + + Name string // Name of file entry + Linkname string // Target name of link (valid for TypeLink or TypeSymlink) + + Size int64 // Logical file size in bytes + Mode int64 // Permission and mode bits + Uid int // User ID of owner + Gid int // Group ID of owner + Uname string // User name of owner + Gname string // Group name of owner + + // If the Format is unspecified, then Writer.WriteHeader rounds ModTime + // to the nearest second and ignores the AccessTime and ChangeTime fields. + // + // To use AccessTime or ChangeTime, specify the Format as PAX or GNU. + // To use sub-second resolution, specify the Format as PAX. + ModTime time.Time // Modification time + AccessTime time.Time // Access time (requires either PAX or GNU support) + ChangeTime time.Time // Change time (requires either PAX or GNU support) + + Devmajor int64 // Major device number (valid for TypeChar or TypeBlock) + Devminor int64 // Minor device number (valid for TypeChar or TypeBlock) + + // Xattrs stores extended attributes as PAX records under the + // "SCHILY.xattr." namespace. + // + // The following are semantically equivalent: + // h.Xattrs[key] = value + // h.PAXRecords["SCHILY.xattr."+key] = value + // + // When Writer.WriteHeader is called, the contents of Xattrs will take + // precedence over those in PAXRecords. + // + // Deprecated: Use PAXRecords instead. + Xattrs map[string]string + + // PAXRecords is a map of PAX extended header records. + // + // User-defined records should have keys of the following form: + // VENDOR.keyword + // Where VENDOR is some namespace in all uppercase, and keyword may + // not contain the '=' character (e.g., "GOLANG.pkg.version"). + // The key and value should be non-empty UTF-8 strings. + // + // When Writer.WriteHeader is called, PAX records derived from the + // other fields in Header take precedence over PAXRecords. + PAXRecords map[string]string + + // Format specifies the format of the tar header. + // + // This is set by Reader.Next as a best-effort guess at the format. + // Since the Reader liberally reads some non-compliant files, + // it is possible for this to be FormatUnknown. + // + // If the format is unspecified when Writer.WriteHeader is called, + // then it uses the first format (in the order of USTAR, PAX, GNU) + // capable of encoding this Header (see Format). + Format Format +} + +// sparseEntry represents a Length-sized fragment at Offset in the file. +type sparseEntry struct{ Offset, Length int64 } + +func (s sparseEntry) endOffset() int64 { return s.Offset + s.Length } + +// A sparse file can be represented as either a sparseDatas or a sparseHoles. +// As long as the total size is known, they are equivalent and one can be +// converted to the other form and back. The various tar formats with sparse +// file support represent sparse files in the sparseDatas form. That is, they +// specify the fragments in the file that has data, and treat everything else as +// having zero bytes. As such, the encoding and decoding logic in this package +// deals with sparseDatas. +// +// However, the external API uses sparseHoles instead of sparseDatas because the +// zero value of sparseHoles logically represents a normal file (i.e., there are +// no holes in it). On the other hand, the zero value of sparseDatas implies +// that the file has no data in it, which is rather odd. +// +// As an example, if the underlying raw file contains the 10-byte data: +// var compactFile = "abcdefgh" +// +// And the sparse map has the following entries: +// var spd sparseDatas = []sparseEntry{ +// {Offset: 2, Length: 5}, // Data fragment for 2..6 +// {Offset: 18, Length: 3}, // Data fragment for 18..20 +// } +// var sph sparseHoles = []sparseEntry{ +// {Offset: 0, Length: 2}, // Hole fragment for 0..1 +// {Offset: 7, Length: 11}, // Hole fragment for 7..17 +// {Offset: 21, Length: 4}, // Hole fragment for 21..24 +// } +// +// Then the content of the resulting sparse file with a Header.Size of 25 is: +// var sparseFile = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 +type ( + sparseDatas []sparseEntry + sparseHoles []sparseEntry +) + +// validateSparseEntries reports whether sp is a valid sparse map. +// It does not matter whether sp represents data fragments or hole fragments. +func validateSparseEntries(sp []sparseEntry, size int64) bool { + // Validate all sparse entries. These are the same checks as performed by + // the BSD tar utility. + if size < 0 { + return false + } + var pre sparseEntry + for _, cur := range sp { + switch { + case cur.Offset < 0 || cur.Length < 0: + return false // Negative values are never okay + case cur.Offset > math.MaxInt64-cur.Length: + return false // Integer overflow with large length + case cur.endOffset() > size: + return false // Region extends beyond the actual size + case pre.endOffset() > cur.Offset: + return false // Regions cannot overlap and must be in order + } + pre = cur + } + return true +} + +// alignSparseEntries mutates src and returns dst where each fragment's +// starting offset is aligned up to the nearest block edge, and each +// ending offset is aligned down to the nearest block edge. +// +// Even though the Go tar Reader and the BSD tar utility can handle entries +// with arbitrary offsets and lengths, the GNU tar utility can only handle +// offsets and lengths that are multiples of blockSize. +func alignSparseEntries(src []sparseEntry, size int64) []sparseEntry { + dst := src[:0] + for _, s := range src { + pos, end := s.Offset, s.endOffset() + pos += blockPadding(+pos) // Round-up to nearest blockSize + if end != size { + end -= blockPadding(-end) // Round-down to nearest blockSize + } + if pos < end { + dst = append(dst, sparseEntry{Offset: pos, Length: end - pos}) + } + } + return dst +} + +// invertSparseEntries converts a sparse map from one form to the other. +// If the input is sparseHoles, then it will output sparseDatas and vice-versa. +// The input must have been already validated. +// +// This function mutates src and returns a normalized map where: +// * adjacent fragments are coalesced together +// * only the last fragment may be empty +// * the endOffset of the last fragment is the total size +func invertSparseEntries(src []sparseEntry, size int64) []sparseEntry { + dst := src[:0] + var pre sparseEntry + for _, cur := range src { + if cur.Length == 0 { + continue // Skip empty fragments + } + pre.Length = cur.Offset - pre.Offset + if pre.Length > 0 { + dst = append(dst, pre) // Only add non-empty fragments + } + pre.Offset = cur.endOffset() + } + pre.Length = size - pre.Offset // Possibly the only empty fragment + return append(dst, pre) +} + +// fileState tracks the number of logical (includes sparse holes) and physical +// (actual in tar archive) bytes remaining for the current file. +// +// Invariant: LogicalRemaining >= PhysicalRemaining +type fileState interface { + LogicalRemaining() int64 + PhysicalRemaining() int64 +} + +// allowedFormats determines which formats can be used. +// The value returned is the logical OR of multiple possible formats. +// If the value is FormatUnknown, then the input Header cannot be encoded +// and an error is returned explaining why. +// +// As a by-product of checking the fields, this function returns paxHdrs, which +// contain all fields that could not be directly encoded. +// A value receiver ensures that this method does not mutate the source Header. +func (h Header) allowedFormats() (format Format, paxHdrs map[string]string, err error) { + format = FormatUSTAR | FormatPAX | FormatGNU + paxHdrs = make(map[string]string) + + var whyNoUSTAR, whyNoPAX, whyNoGNU string + var preferPAX bool // Prefer PAX over USTAR + verifyString := func(s string, size int, name, paxKey string) { + // NUL-terminator is optional for path and linkpath. + // Technically, it is required for uname and gname, + // but neither GNU nor BSD tar checks for it. + tooLong := len(s) > size + allowLongGNU := paxKey == paxPath || paxKey == paxLinkpath + if hasNUL(s) || (tooLong && !allowLongGNU) { + whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%q", name, s) + format.mustNotBe(FormatGNU) + } + if !isASCII(s) || tooLong { + canSplitUSTAR := paxKey == paxPath + if _, _, ok := splitUSTARPath(s); !canSplitUSTAR || !ok { + whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%q", name, s) + format.mustNotBe(FormatUSTAR) + } + if paxKey == paxNone { + whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%q", name, s) + format.mustNotBe(FormatPAX) + } else { + paxHdrs[paxKey] = s + } + } + if v, ok := h.PAXRecords[paxKey]; ok && v == s { + paxHdrs[paxKey] = v + } + } + verifyNumeric := func(n int64, size int, name, paxKey string) { + if !fitsInBase256(size, n) { + whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%d", name, n) + format.mustNotBe(FormatGNU) + } + if !fitsInOctal(size, n) { + whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%d", name, n) + format.mustNotBe(FormatUSTAR) + if paxKey == paxNone { + whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%d", name, n) + format.mustNotBe(FormatPAX) + } else { + paxHdrs[paxKey] = strconv.FormatInt(n, 10) + } + } + if v, ok := h.PAXRecords[paxKey]; ok && v == strconv.FormatInt(n, 10) { + paxHdrs[paxKey] = v + } + } + verifyTime := func(ts time.Time, size int, name, paxKey string) { + if ts.IsZero() { + return // Always okay + } + if !fitsInBase256(size, ts.Unix()) { + whyNoGNU = fmt.Sprintf("GNU cannot encode %s=%v", name, ts) + format.mustNotBe(FormatGNU) + } + isMtime := paxKey == paxMtime + fitsOctal := fitsInOctal(size, ts.Unix()) + if (isMtime && !fitsOctal) || !isMtime { + whyNoUSTAR = fmt.Sprintf("USTAR cannot encode %s=%v", name, ts) + format.mustNotBe(FormatUSTAR) + } + needsNano := ts.Nanosecond() != 0 + if !isMtime || !fitsOctal || needsNano { + preferPAX = true // USTAR may truncate sub-second measurements + if paxKey == paxNone { + whyNoPAX = fmt.Sprintf("PAX cannot encode %s=%v", name, ts) + format.mustNotBe(FormatPAX) + } else { + paxHdrs[paxKey] = formatPAXTime(ts) + } + } + if v, ok := h.PAXRecords[paxKey]; ok && v == formatPAXTime(ts) { + paxHdrs[paxKey] = v + } + } + + // Check basic fields. + var blk block + v7 := blk.V7() + ustar := blk.USTAR() + gnu := blk.GNU() + verifyString(h.Name, len(v7.Name()), "Name", paxPath) + verifyString(h.Linkname, len(v7.LinkName()), "Linkname", paxLinkpath) + verifyString(h.Uname, len(ustar.UserName()), "Uname", paxUname) + verifyString(h.Gname, len(ustar.GroupName()), "Gname", paxGname) + verifyNumeric(h.Mode, len(v7.Mode()), "Mode", paxNone) + verifyNumeric(int64(h.Uid), len(v7.UID()), "Uid", paxUid) + verifyNumeric(int64(h.Gid), len(v7.GID()), "Gid", paxGid) + verifyNumeric(h.Size, len(v7.Size()), "Size", paxSize) + verifyNumeric(h.Devmajor, len(ustar.DevMajor()), "Devmajor", paxNone) + verifyNumeric(h.Devminor, len(ustar.DevMinor()), "Devminor", paxNone) + verifyTime(h.ModTime, len(v7.ModTime()), "ModTime", paxMtime) + verifyTime(h.AccessTime, len(gnu.AccessTime()), "AccessTime", paxAtime) + verifyTime(h.ChangeTime, len(gnu.ChangeTime()), "ChangeTime", paxCtime) + + // Check for header-only types. + var whyOnlyPAX, whyOnlyGNU string + switch h.Typeflag { + case TypeReg, TypeChar, TypeBlock, TypeFifo, TypeGNUSparse: + // Exclude TypeLink and TypeSymlink, since they may reference directories. + if strings.HasSuffix(h.Name, "/") { + return FormatUnknown, nil, headerError{"filename may not have trailing slash"} + } + case TypeXHeader, TypeGNULongName, TypeGNULongLink: + return FormatUnknown, nil, headerError{"cannot manually encode TypeXHeader, TypeGNULongName, or TypeGNULongLink headers"} + case TypeXGlobalHeader: + h2 := Header{Name: h.Name, Typeflag: h.Typeflag, Xattrs: h.Xattrs, PAXRecords: h.PAXRecords, Format: h.Format} + if !reflect.DeepEqual(h, h2) { + return FormatUnknown, nil, headerError{"only PAXRecords should be set for TypeXGlobalHeader"} + } + whyOnlyPAX = "only PAX supports TypeXGlobalHeader" + format.mayOnlyBe(FormatPAX) + } + if !isHeaderOnlyType(h.Typeflag) && h.Size < 0 { + return FormatUnknown, nil, headerError{"negative size on header-only type"} + } + + // Check PAX records. + if len(h.Xattrs) > 0 { + for k, v := range h.Xattrs { + paxHdrs[paxSchilyXattr+k] = v + } + whyOnlyPAX = "only PAX supports Xattrs" + format.mayOnlyBe(FormatPAX) + } + if len(h.PAXRecords) > 0 { + for k, v := range h.PAXRecords { + switch _, exists := paxHdrs[k]; { + case exists: + continue // Do not overwrite existing records + case h.Typeflag == TypeXGlobalHeader: + paxHdrs[k] = v // Copy all records + case !basicKeys[k] && !strings.HasPrefix(k, paxGNUSparse): + paxHdrs[k] = v // Ignore local records that may conflict + } + } + whyOnlyPAX = "only PAX supports PAXRecords" + format.mayOnlyBe(FormatPAX) + } + for k, v := range paxHdrs { + if !validPAXRecord(k, v) { + return FormatUnknown, nil, headerError{fmt.Sprintf("invalid PAX record: %q", k+" = "+v)} + } + } + + // TODO(dsnet): Re-enable this when adding sparse support. + // See https://golang.org/issue/22735 + /* + // Check sparse files. + if len(h.SparseHoles) > 0 || h.Typeflag == TypeGNUSparse { + if isHeaderOnlyType(h.Typeflag) { + return FormatUnknown, nil, headerError{"header-only type cannot be sparse"} + } + if !validateSparseEntries(h.SparseHoles, h.Size) { + return FormatUnknown, nil, headerError{"invalid sparse holes"} + } + if h.Typeflag == TypeGNUSparse { + whyOnlyGNU = "only GNU supports TypeGNUSparse" + format.mayOnlyBe(FormatGNU) + } else { + whyNoGNU = "GNU supports sparse files only with TypeGNUSparse" + format.mustNotBe(FormatGNU) + } + whyNoUSTAR = "USTAR does not support sparse files" + format.mustNotBe(FormatUSTAR) + } + */ + + // Check desired format. + if wantFormat := h.Format; wantFormat != FormatUnknown { + if wantFormat.has(FormatPAX) && !preferPAX { + wantFormat.mayBe(FormatUSTAR) // PAX implies USTAR allowed too + } + format.mayOnlyBe(wantFormat) // Set union of formats allowed and format wanted + } + if format == FormatUnknown { + switch h.Format { + case FormatUSTAR: + err = headerError{"Format specifies USTAR", whyNoUSTAR, whyOnlyPAX, whyOnlyGNU} + case FormatPAX: + err = headerError{"Format specifies PAX", whyNoPAX, whyOnlyGNU} + case FormatGNU: + err = headerError{"Format specifies GNU", whyNoGNU, whyOnlyPAX} + default: + err = headerError{whyNoUSTAR, whyNoPAX, whyNoGNU, whyOnlyPAX, whyOnlyGNU} + } + } + return format, paxHdrs, err +} + +// FileInfo returns an os.FileInfo for the Header. +func (h *Header) FileInfo() os.FileInfo { + return headerFileInfo{h} +} + +// headerFileInfo implements os.FileInfo. +type headerFileInfo struct { + h *Header +} + +func (fi headerFileInfo) Size() int64 { return fi.h.Size } +func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } +func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } +func (fi headerFileInfo) Sys() interface{} { return fi.h } + +// Name returns the base name of the file. +func (fi headerFileInfo) Name() string { + if fi.IsDir() { + return path.Base(path.Clean(fi.h.Name)) + } + return path.Base(fi.h.Name) +} + +// Mode returns the permission and mode bits for the headerFileInfo. +func (fi headerFileInfo) Mode() (mode os.FileMode) { + // Set file permission bits. + mode = os.FileMode(fi.h.Mode).Perm() + + // Set setuid, setgid and sticky bits. + if fi.h.Mode&c_ISUID != 0 { + mode |= os.ModeSetuid + } + if fi.h.Mode&c_ISGID != 0 { + mode |= os.ModeSetgid + } + if fi.h.Mode&c_ISVTX != 0 { + mode |= os.ModeSticky + } + + // Set file mode bits; clear perm, setuid, setgid, and sticky bits. + switch m := os.FileMode(fi.h.Mode) &^ 07777; m { + case c_ISDIR: + mode |= os.ModeDir + case c_ISFIFO: + mode |= os.ModeNamedPipe + case c_ISLNK: + mode |= os.ModeSymlink + case c_ISBLK: + mode |= os.ModeDevice + case c_ISCHR: + mode |= os.ModeDevice + mode |= os.ModeCharDevice + case c_ISSOCK: + mode |= os.ModeSocket + } + + switch fi.h.Typeflag { + case TypeSymlink: + mode |= os.ModeSymlink + case TypeChar: + mode |= os.ModeDevice + mode |= os.ModeCharDevice + case TypeBlock: + mode |= os.ModeDevice + case TypeDir: + mode |= os.ModeDir + case TypeFifo: + mode |= os.ModeNamedPipe + } + + return mode +} + +// sysStat, if non-nil, populates h from system-dependent fields of fi. +var sysStat func(fi os.FileInfo, h *Header) error + +const ( + // Mode constants from the USTAR spec: + // See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06 + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + + // Common Unix mode constants; these are not defined in any common tar standard. + // Header.FileInfo understands these, but FileInfoHeader will never produce these. + c_ISDIR = 040000 // Directory + c_ISFIFO = 010000 // FIFO + c_ISREG = 0100000 // Regular file + c_ISLNK = 0120000 // Symbolic link + c_ISBLK = 060000 // Block special file + c_ISCHR = 020000 // Character special file + c_ISSOCK = 0140000 // Socket +) + +// FileInfoHeader creates a partially-populated Header from fi. +// If fi describes a symlink, FileInfoHeader records link as the link target. +// If fi describes a directory, a slash is appended to the name. +// +// Since os.FileInfo's Name method only returns the base name of +// the file it describes, it may be necessary to modify Header.Name +// to provide the full path name of the file. +func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { + if fi == nil { + return nil, errors.New("archive/tar: FileInfo is nil") + } + fm := fi.Mode() + h := &Header{ + Name: fi.Name(), + ModTime: fi.ModTime(), + Mode: int64(fm.Perm()), // or'd with c_IS* constants later + } + switch { + case fm.IsRegular(): + h.Typeflag = TypeReg + h.Size = fi.Size() + case fi.IsDir(): + h.Typeflag = TypeDir + h.Name += "/" + case fm&os.ModeSymlink != 0: + h.Typeflag = TypeSymlink + h.Linkname = link + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + h.Typeflag = TypeChar + } else { + h.Typeflag = TypeBlock + } + case fm&os.ModeNamedPipe != 0: + h.Typeflag = TypeFifo + case fm&os.ModeSocket != 0: + return nil, fmt.Errorf("archive/tar: sockets not supported") + default: + return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm) + } + if fm&os.ModeSetuid != 0 { + h.Mode |= c_ISUID + } + if fm&os.ModeSetgid != 0 { + h.Mode |= c_ISGID + } + if fm&os.ModeSticky != 0 { + h.Mode |= c_ISVTX + } + // If possible, populate additional fields from OS-specific + // FileInfo fields. + if sys, ok := fi.Sys().(*Header); ok { + // This FileInfo came from a Header (not the OS). Use the + // original Header to populate all remaining fields. + h.Uid = sys.Uid + h.Gid = sys.Gid + h.Uname = sys.Uname + h.Gname = sys.Gname + h.AccessTime = sys.AccessTime + h.ChangeTime = sys.ChangeTime + if sys.Xattrs != nil { + h.Xattrs = make(map[string]string) + for k, v := range sys.Xattrs { + h.Xattrs[k] = v + } + } + if sys.Typeflag == TypeLink { + // hard link + h.Typeflag = TypeLink + h.Size = 0 + h.Linkname = sys.Linkname + } + if sys.PAXRecords != nil { + h.PAXRecords = make(map[string]string) + for k, v := range sys.PAXRecords { + h.PAXRecords[k] = v + } + } + } + if sysStat != nil { + return h, sysStat(fi, h) + } + return h, nil +} + +// isHeaderOnlyType checks if the given type flag is of the type that has no +// data section even if a size is specified. +func isHeaderOnlyType(flag byte) bool { + switch flag { + case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo: + return true + default: + return false + } +} + +func min(a, b int64) int64 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/format.go b/vendor/github.com/vbatts/tar-split/archive/tar/format.go new file mode 100644 index 00000000..1f89d0c5 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/format.go @@ -0,0 +1,303 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import "strings" + +// Format represents the tar archive format. +// +// The original tar format was introduced in Unix V7. +// Since then, there have been multiple competing formats attempting to +// standardize or extend the V7 format to overcome its limitations. +// The most common formats are the USTAR, PAX, and GNU formats, +// each with their own advantages and limitations. +// +// The following table captures the capabilities of each format: +// +// | USTAR | PAX | GNU +// ------------------+--------+-----------+---------- +// Name | 256B | unlimited | unlimited +// Linkname | 100B | unlimited | unlimited +// Size | uint33 | unlimited | uint89 +// Mode | uint21 | uint21 | uint57 +// Uid/Gid | uint21 | unlimited | uint57 +// Uname/Gname | 32B | unlimited | 32B +// ModTime | uint33 | unlimited | int89 +// AccessTime | n/a | unlimited | int89 +// ChangeTime | n/a | unlimited | int89 +// Devmajor/Devminor | uint21 | uint21 | uint57 +// ------------------+--------+-----------+---------- +// string encoding | ASCII | UTF-8 | binary +// sub-second times | no | yes | no +// sparse files | no | yes | yes +// +// The table's upper portion shows the Header fields, where each format reports +// the maximum number of bytes allowed for each string field and +// the integer type used to store each numeric field +// (where timestamps are stored as the number of seconds since the Unix epoch). +// +// The table's lower portion shows specialized features of each format, +// such as supported string encodings, support for sub-second timestamps, +// or support for sparse files. +// +// The Writer currently provides no support for sparse files. +type Format int + +// Constants to identify various tar formats. +const ( + // Deliberately hide the meaning of constants from public API. + _ Format = (1 << iota) / 4 // Sequence of 0, 0, 1, 2, 4, 8, etc... + + // FormatUnknown indicates that the format is unknown. + FormatUnknown + + // The format of the original Unix V7 tar tool prior to standardization. + formatV7 + + // FormatUSTAR represents the USTAR header format defined in POSIX.1-1988. + // + // While this format is compatible with most tar readers, + // the format has several limitations making it unsuitable for some usages. + // Most notably, it cannot support sparse files, files larger than 8GiB, + // filenames larger than 256 characters, and non-ASCII filenames. + // + // Reference: + // http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06 + FormatUSTAR + + // FormatPAX represents the PAX header format defined in POSIX.1-2001. + // + // PAX extends USTAR by writing a special file with Typeflag TypeXHeader + // preceding the original header. This file contains a set of key-value + // records, which are used to overcome USTAR's shortcomings, in addition to + // providing the ability to have sub-second resolution for timestamps. + // + // Some newer formats add their own extensions to PAX by defining their + // own keys and assigning certain semantic meaning to the associated values. + // For example, sparse file support in PAX is implemented using keys + // defined by the GNU manual (e.g., "GNU.sparse.map"). + // + // Reference: + // http://pubs.opengroup.org/onlinepubs/009695399/utilities/pax.html + FormatPAX + + // FormatGNU represents the GNU header format. + // + // The GNU header format is older than the USTAR and PAX standards and + // is not compatible with them. The GNU format supports + // arbitrary file sizes, filenames of arbitrary encoding and length, + // sparse files, and other features. + // + // It is recommended that PAX be chosen over GNU unless the target + // application can only parse GNU formatted archives. + // + // Reference: + // https://www.gnu.org/software/tar/manual/html_node/Standard.html + FormatGNU + + // Schily's tar format, which is incompatible with USTAR. + // This does not cover STAR extensions to the PAX format; these fall under + // the PAX format. + formatSTAR + + formatMax +) + +func (f Format) has(f2 Format) bool { return f&f2 != 0 } +func (f *Format) mayBe(f2 Format) { *f |= f2 } +func (f *Format) mayOnlyBe(f2 Format) { *f &= f2 } +func (f *Format) mustNotBe(f2 Format) { *f &^= f2 } + +var formatNames = map[Format]string{ + formatV7: "V7", FormatUSTAR: "USTAR", FormatPAX: "PAX", FormatGNU: "GNU", formatSTAR: "STAR", +} + +func (f Format) String() string { + var ss []string + for f2 := Format(1); f2 < formatMax; f2 <<= 1 { + if f.has(f2) { + ss = append(ss, formatNames[f2]) + } + } + switch len(ss) { + case 0: + return "" + case 1: + return ss[0] + default: + return "(" + strings.Join(ss, " | ") + ")" + } +} + +// Magics used to identify various formats. +const ( + magicGNU, versionGNU = "ustar ", " \x00" + magicUSTAR, versionUSTAR = "ustar\x00", "00" + trailerSTAR = "tar\x00" +) + +// Size constants from various tar specifications. +const ( + blockSize = 512 // Size of each block in a tar stream + nameSize = 100 // Max length of the name field in USTAR format + prefixSize = 155 // Max length of the prefix field in USTAR format +) + +// blockPadding computes the number of bytes needed to pad offset up to the +// nearest block edge where 0 <= n < blockSize. +func blockPadding(offset int64) (n int64) { + return -offset & (blockSize - 1) +} + +var zeroBlock block + +type block [blockSize]byte + +// Convert block to any number of formats. +func (b *block) V7() *headerV7 { return (*headerV7)(b) } +func (b *block) GNU() *headerGNU { return (*headerGNU)(b) } +func (b *block) STAR() *headerSTAR { return (*headerSTAR)(b) } +func (b *block) USTAR() *headerUSTAR { return (*headerUSTAR)(b) } +func (b *block) Sparse() sparseArray { return (sparseArray)(b[:]) } + +// GetFormat checks that the block is a valid tar header based on the checksum. +// It then attempts to guess the specific format based on magic values. +// If the checksum fails, then FormatUnknown is returned. +func (b *block) GetFormat() Format { + // Verify checksum. + var p parser + value := p.parseOctal(b.V7().Chksum()) + chksum1, chksum2 := b.ComputeChecksum() + if p.err != nil || (value != chksum1 && value != chksum2) { + return FormatUnknown + } + + // Guess the magic values. + magic := string(b.USTAR().Magic()) + version := string(b.USTAR().Version()) + trailer := string(b.STAR().Trailer()) + switch { + case magic == magicUSTAR && trailer == trailerSTAR: + return formatSTAR + case magic == magicUSTAR: + return FormatUSTAR | FormatPAX + case magic == magicGNU && version == versionGNU: + return FormatGNU + default: + return formatV7 + } +} + +// SetFormat writes the magic values necessary for specified format +// and then updates the checksum accordingly. +func (b *block) SetFormat(format Format) { + // Set the magic values. + switch { + case format.has(formatV7): + // Do nothing. + case format.has(FormatGNU): + copy(b.GNU().Magic(), magicGNU) + copy(b.GNU().Version(), versionGNU) + case format.has(formatSTAR): + copy(b.STAR().Magic(), magicUSTAR) + copy(b.STAR().Version(), versionUSTAR) + copy(b.STAR().Trailer(), trailerSTAR) + case format.has(FormatUSTAR | FormatPAX): + copy(b.USTAR().Magic(), magicUSTAR) + copy(b.USTAR().Version(), versionUSTAR) + default: + panic("invalid format") + } + + // Update checksum. + // This field is special in that it is terminated by a NULL then space. + var f formatter + field := b.V7().Chksum() + chksum, _ := b.ComputeChecksum() // Possible values are 256..128776 + f.formatOctal(field[:7], chksum) // Never fails since 128776 < 262143 + field[7] = ' ' +} + +// ComputeChecksum computes the checksum for the header block. +// POSIX specifies a sum of the unsigned byte values, but the Sun tar used +// signed byte values. +// We compute and return both. +func (b *block) ComputeChecksum() (unsigned, signed int64) { + for i, c := range b { + if 148 <= i && i < 156 { + c = ' ' // Treat the checksum field itself as all spaces. + } + unsigned += int64(c) + signed += int64(int8(c)) + } + return unsigned, signed +} + +// Reset clears the block with all zeros. +func (b *block) Reset() { + *b = block{} +} + +type headerV7 [blockSize]byte + +func (h *headerV7) Name() []byte { return h[000:][:100] } +func (h *headerV7) Mode() []byte { return h[100:][:8] } +func (h *headerV7) UID() []byte { return h[108:][:8] } +func (h *headerV7) GID() []byte { return h[116:][:8] } +func (h *headerV7) Size() []byte { return h[124:][:12] } +func (h *headerV7) ModTime() []byte { return h[136:][:12] } +func (h *headerV7) Chksum() []byte { return h[148:][:8] } +func (h *headerV7) TypeFlag() []byte { return h[156:][:1] } +func (h *headerV7) LinkName() []byte { return h[157:][:100] } + +type headerGNU [blockSize]byte + +func (h *headerGNU) V7() *headerV7 { return (*headerV7)(h) } +func (h *headerGNU) Magic() []byte { return h[257:][:6] } +func (h *headerGNU) Version() []byte { return h[263:][:2] } +func (h *headerGNU) UserName() []byte { return h[265:][:32] } +func (h *headerGNU) GroupName() []byte { return h[297:][:32] } +func (h *headerGNU) DevMajor() []byte { return h[329:][:8] } +func (h *headerGNU) DevMinor() []byte { return h[337:][:8] } +func (h *headerGNU) AccessTime() []byte { return h[345:][:12] } +func (h *headerGNU) ChangeTime() []byte { return h[357:][:12] } +func (h *headerGNU) Sparse() sparseArray { return (sparseArray)(h[386:][:24*4+1]) } +func (h *headerGNU) RealSize() []byte { return h[483:][:12] } + +type headerSTAR [blockSize]byte + +func (h *headerSTAR) V7() *headerV7 { return (*headerV7)(h) } +func (h *headerSTAR) Magic() []byte { return h[257:][:6] } +func (h *headerSTAR) Version() []byte { return h[263:][:2] } +func (h *headerSTAR) UserName() []byte { return h[265:][:32] } +func (h *headerSTAR) GroupName() []byte { return h[297:][:32] } +func (h *headerSTAR) DevMajor() []byte { return h[329:][:8] } +func (h *headerSTAR) DevMinor() []byte { return h[337:][:8] } +func (h *headerSTAR) Prefix() []byte { return h[345:][:131] } +func (h *headerSTAR) AccessTime() []byte { return h[476:][:12] } +func (h *headerSTAR) ChangeTime() []byte { return h[488:][:12] } +func (h *headerSTAR) Trailer() []byte { return h[508:][:4] } + +type headerUSTAR [blockSize]byte + +func (h *headerUSTAR) V7() *headerV7 { return (*headerV7)(h) } +func (h *headerUSTAR) Magic() []byte { return h[257:][:6] } +func (h *headerUSTAR) Version() []byte { return h[263:][:2] } +func (h *headerUSTAR) UserName() []byte { return h[265:][:32] } +func (h *headerUSTAR) GroupName() []byte { return h[297:][:32] } +func (h *headerUSTAR) DevMajor() []byte { return h[329:][:8] } +func (h *headerUSTAR) DevMinor() []byte { return h[337:][:8] } +func (h *headerUSTAR) Prefix() []byte { return h[345:][:155] } + +type sparseArray []byte + +func (s sparseArray) Entry(i int) sparseElem { return (sparseElem)(s[i*24:]) } +func (s sparseArray) IsExtended() []byte { return s[24*s.MaxEntries():][:1] } +func (s sparseArray) MaxEntries() int { return len(s) / 24 } + +type sparseElem []byte + +func (s sparseElem) Offset() []byte { return s[00:][:12] } +func (s sparseElem) Length() []byte { return s[12:][:12] } diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go new file mode 100644 index 00000000..ea64a382 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -0,0 +1,923 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "io" + "io/ioutil" + "strconv" + "strings" + "time" +) + +// Reader provides sequential access to the contents of a tar archive. +// Reader.Next advances to the next file in the archive (including the first), +// and then Reader can be treated as an io.Reader to access the file's data. +type Reader struct { + r io.Reader + pad int64 // Amount of padding (ignored) after current file entry + curr fileReader // Reader for current file entry + blk block // Buffer to use as temporary local storage + + // err is a persistent error. + // It is only the responsibility of every exported method of Reader to + // ensure that this error is sticky. + err error + + RawAccounting bool // Whether to enable the access needed to reassemble the tar from raw bytes. Some performance/memory hit for this. + rawBytes *bytes.Buffer // last raw bits +} + +type fileReader interface { + io.Reader + fileState + + WriteTo(io.Writer) (int64, error) +} + +// RawBytes accesses the raw bytes of the archive, apart from the file payload itself. +// This includes the header and padding. +// +// This call resets the current rawbytes buffer +// +// Only when RawAccounting is enabled, otherwise this returns nil +func (tr *Reader) RawBytes() []byte { + if !tr.RawAccounting { + return nil + } + if tr.rawBytes == nil { + tr.rawBytes = bytes.NewBuffer(nil) + } + defer tr.rawBytes.Reset() // if we've read them, then flush them. + + return tr.rawBytes.Bytes() + +} + +// NewReader creates a new Reader reading from r. +func NewReader(r io.Reader) *Reader { + return &Reader{r: r, curr: ®FileReader{r, 0}} +} + +// Next advances to the next entry in the tar archive. +// The Header.Size determines how many bytes can be read for the next file. +// Any remaining data in the current file is automatically discarded. +// +// io.EOF is returned at the end of the input. +func (tr *Reader) Next() (*Header, error) { + if tr.err != nil { + return nil, tr.err + } + hdr, err := tr.next() + tr.err = err + return hdr, err +} + +func (tr *Reader) next() (*Header, error) { + var paxHdrs map[string]string + var gnuLongName, gnuLongLink string + + if tr.RawAccounting { + if tr.rawBytes == nil { + tr.rawBytes = bytes.NewBuffer(nil) + } else { + tr.rawBytes.Reset() + } + } + + // Externally, Next iterates through the tar archive as if it is a series of + // files. Internally, the tar format often uses fake "files" to add meta + // data that describes the next file. These meta data "files" should not + // normally be visible to the outside. As such, this loop iterates through + // one or more "header files" until it finds a "normal file". + format := FormatUSTAR | FormatPAX | FormatGNU + for { + // Discard the remainder of the file and any padding. + if err := discard(tr, tr.curr.PhysicalRemaining()); err != nil { + return nil, err + } + n, err := tryReadFull(tr.r, tr.blk[:tr.pad]) + if err != nil { + return nil, err + } + if tr.RawAccounting { + tr.rawBytes.Write(tr.blk[:n]) + } + tr.pad = 0 + + hdr, rawHdr, err := tr.readHeader() + if err != nil { + return nil, err + } + if err := tr.handleRegularFile(hdr); err != nil { + return nil, err + } + format.mayOnlyBe(hdr.Format) + + // Check for PAX/GNU special headers and files. + switch hdr.Typeflag { + case TypeXHeader, TypeXGlobalHeader: + format.mayOnlyBe(FormatPAX) + paxHdrs, err = parsePAX(tr) + if err != nil { + return nil, err + } + if hdr.Typeflag == TypeXGlobalHeader { + mergePAX(hdr, paxHdrs) + return &Header{ + Name: hdr.Name, + Typeflag: hdr.Typeflag, + Xattrs: hdr.Xattrs, + PAXRecords: hdr.PAXRecords, + Format: format, + }, nil + } + continue // This is a meta header affecting the next header + case TypeGNULongName, TypeGNULongLink: + format.mayOnlyBe(FormatGNU) + realname, err := ioutil.ReadAll(tr) + if err != nil { + return nil, err + } + + if tr.RawAccounting { + tr.rawBytes.Write(realname) + } + + var p parser + switch hdr.Typeflag { + case TypeGNULongName: + gnuLongName = p.parseString(realname) + case TypeGNULongLink: + gnuLongLink = p.parseString(realname) + } + continue // This is a meta header affecting the next header + default: + // The old GNU sparse format is handled here since it is technically + // just a regular file with additional attributes. + + if err := mergePAX(hdr, paxHdrs); err != nil { + return nil, err + } + if gnuLongName != "" { + hdr.Name = gnuLongName + } + if gnuLongLink != "" { + hdr.Linkname = gnuLongLink + } + if hdr.Typeflag == TypeRegA { + if strings.HasSuffix(hdr.Name, "/") { + hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories + } else { + hdr.Typeflag = TypeReg + } + } + + // The extended headers may have updated the size. + // Thus, setup the regFileReader again after merging PAX headers. + if err := tr.handleRegularFile(hdr); err != nil { + return nil, err + } + + // Sparse formats rely on being able to read from the logical data + // section; there must be a preceding call to handleRegularFile. + if err := tr.handleSparseFile(hdr, rawHdr); err != nil { + return nil, err + } + + // Set the final guess at the format. + if format.has(FormatUSTAR) && format.has(FormatPAX) { + format.mayOnlyBe(FormatUSTAR) + } + hdr.Format = format + return hdr, nil // This is a file, so stop + } + } +} + +// handleRegularFile sets up the current file reader and padding such that it +// can only read the following logical data section. It will properly handle +// special headers that contain no data section. +func (tr *Reader) handleRegularFile(hdr *Header) error { + nb := hdr.Size + if isHeaderOnlyType(hdr.Typeflag) { + nb = 0 + } + if nb < 0 { + return ErrHeader + } + + tr.pad = blockPadding(nb) + tr.curr = ®FileReader{r: tr.r, nb: nb} + return nil +} + +// handleSparseFile checks if the current file is a sparse format of any type +// and sets the curr reader appropriately. +func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error { + var spd sparseDatas + var err error + if hdr.Typeflag == TypeGNUSparse { + spd, err = tr.readOldGNUSparseMap(hdr, rawHdr) + } else { + spd, err = tr.readGNUSparsePAXHeaders(hdr) + } + + // If sp is non-nil, then this is a sparse file. + // Note that it is possible for len(sp) == 0. + if err == nil && spd != nil { + if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) { + return ErrHeader + } + sph := invertSparseEntries(spd, hdr.Size) + tr.curr = &sparseFileReader{tr.curr, sph, 0} + } + return err +} + +// readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. +// If they are found, then this function reads the sparse map and returns it. +// This assumes that 0.0 headers have already been converted to 0.1 headers +// by the PAX header parsing logic. +func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) { + // Identify the version of GNU headers. + var is1x0 bool + major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor] + switch { + case major == "0" && (minor == "0" || minor == "1"): + is1x0 = false + case major == "1" && minor == "0": + is1x0 = true + case major != "" || minor != "": + return nil, nil // Unknown GNU sparse PAX version + case hdr.PAXRecords[paxGNUSparseMap] != "": + is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess + default: + return nil, nil // Not a PAX format GNU sparse file. + } + hdr.Format.mayOnlyBe(FormatPAX) + + // Update hdr from GNU sparse PAX headers. + if name := hdr.PAXRecords[paxGNUSparseName]; name != "" { + hdr.Name = name + } + size := hdr.PAXRecords[paxGNUSparseSize] + if size == "" { + size = hdr.PAXRecords[paxGNUSparseRealSize] + } + if size != "" { + n, err := strconv.ParseInt(size, 10, 64) + if err != nil { + return nil, ErrHeader + } + hdr.Size = n + } + + // Read the sparse map according to the appropriate format. + if is1x0 { + return readGNUSparseMap1x0(tr.curr) + } + return readGNUSparseMap0x1(hdr.PAXRecords) +} + +// mergePAX merges paxHdrs into hdr for all relevant fields of Header. +func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) { + for k, v := range paxHdrs { + if v == "" { + continue // Keep the original USTAR value + } + var id64 int64 + switch k { + case paxPath: + hdr.Name = v + case paxLinkpath: + hdr.Linkname = v + case paxUname: + hdr.Uname = v + case paxGname: + hdr.Gname = v + case paxUid: + id64, err = strconv.ParseInt(v, 10, 64) + hdr.Uid = int(id64) // Integer overflow possible + case paxGid: + id64, err = strconv.ParseInt(v, 10, 64) + hdr.Gid = int(id64) // Integer overflow possible + case paxAtime: + hdr.AccessTime, err = parsePAXTime(v) + case paxMtime: + hdr.ModTime, err = parsePAXTime(v) + case paxCtime: + hdr.ChangeTime, err = parsePAXTime(v) + case paxSize: + hdr.Size, err = strconv.ParseInt(v, 10, 64) + default: + if strings.HasPrefix(k, paxSchilyXattr) { + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + hdr.Xattrs[k[len(paxSchilyXattr):]] = v + } + } + if err != nil { + return ErrHeader + } + } + hdr.PAXRecords = paxHdrs + return nil +} + +// parsePAX parses PAX headers. +// If an extended header (type 'x') is invalid, ErrHeader is returned +func parsePAX(r io.Reader) (map[string]string, error) { + buf, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + // leaving this function for io.Reader makes it more testable + if tr, ok := r.(*Reader); ok && tr.RawAccounting { + if _, err = tr.rawBytes.Write(buf); err != nil { + return nil, err + } + } + sbuf := string(buf) + + // For GNU PAX sparse format 0.0 support. + // This function transforms the sparse format 0.0 headers into format 0.1 + // headers since 0.0 headers were not PAX compliant. + var sparseMap []string + + paxHdrs := make(map[string]string) + for len(sbuf) > 0 { + key, value, residual, err := parsePAXRecord(sbuf) + if err != nil { + return nil, ErrHeader + } + sbuf = residual + + switch key { + case paxGNUSparseOffset, paxGNUSparseNumBytes: + // Validate sparse header order and value. + if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) || + (len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) || + strings.Contains(value, ",") { + return nil, ErrHeader + } + sparseMap = append(sparseMap, value) + default: + paxHdrs[key] = value + } + } + if len(sparseMap) > 0 { + paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",") + } + return paxHdrs, nil +} + +// readHeader reads the next block header and assumes that the underlying reader +// is already aligned to a block boundary. It returns the raw block of the +// header in case further processing is required. +// +// The err will be set to io.EOF only when one of the following occurs: +// * Exactly 0 bytes are read and EOF is hit. +// * Exactly 1 block of zeros is read and EOF is hit. +// * At least 2 blocks of zeros are read. +func (tr *Reader) readHeader() (*Header, *block, error) { + // Two blocks of zero bytes marks the end of the archive. + n, err := io.ReadFull(tr.r, tr.blk[:]) + if tr.RawAccounting && (err == nil || err == io.EOF) { + tr.rawBytes.Write(tr.blk[:n]) + } + if err != nil { + return nil, nil, err // EOF is okay here; exactly 0 bytes read + } + + if bytes.Equal(tr.blk[:], zeroBlock[:]) { + n, err = io.ReadFull(tr.r, tr.blk[:]) + if tr.RawAccounting && (err == nil || err == io.EOF) { + tr.rawBytes.Write(tr.blk[:n]) + } + if err != nil { + return nil, nil, err // EOF is okay here; exactly 1 block of zeros read + } + if bytes.Equal(tr.blk[:], zeroBlock[:]) { + return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read + } + return nil, nil, ErrHeader // Zero block and then non-zero block + } + + // Verify the header matches a known format. + format := tr.blk.GetFormat() + if format == FormatUnknown { + return nil, nil, ErrHeader + } + + var p parser + hdr := new(Header) + + // Unpack the V7 header. + v7 := tr.blk.V7() + hdr.Typeflag = v7.TypeFlag()[0] + hdr.Name = p.parseString(v7.Name()) + hdr.Linkname = p.parseString(v7.LinkName()) + hdr.Size = p.parseNumeric(v7.Size()) + hdr.Mode = p.parseNumeric(v7.Mode()) + hdr.Uid = int(p.parseNumeric(v7.UID())) + hdr.Gid = int(p.parseNumeric(v7.GID())) + hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0) + + // Unpack format specific fields. + if format > formatV7 { + ustar := tr.blk.USTAR() + hdr.Uname = p.parseString(ustar.UserName()) + hdr.Gname = p.parseString(ustar.GroupName()) + hdr.Devmajor = p.parseNumeric(ustar.DevMajor()) + hdr.Devminor = p.parseNumeric(ustar.DevMinor()) + + var prefix string + switch { + case format.has(FormatUSTAR | FormatPAX): + hdr.Format = format + ustar := tr.blk.USTAR() + prefix = p.parseString(ustar.Prefix()) + + // For Format detection, check if block is properly formatted since + // the parser is more liberal than what USTAR actually permits. + notASCII := func(r rune) bool { return r >= 0x80 } + if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 { + hdr.Format = FormatUnknown // Non-ASCII characters in block. + } + nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 } + if !(nul(v7.Size()) && nul(v7.Mode()) && nul(v7.UID()) && nul(v7.GID()) && + nul(v7.ModTime()) && nul(ustar.DevMajor()) && nul(ustar.DevMinor())) { + hdr.Format = FormatUnknown // Numeric fields must end in NUL + } + case format.has(formatSTAR): + star := tr.blk.STAR() + prefix = p.parseString(star.Prefix()) + hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0) + hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0) + case format.has(FormatGNU): + hdr.Format = format + var p2 parser + gnu := tr.blk.GNU() + if b := gnu.AccessTime(); b[0] != 0 { + hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0) + } + if b := gnu.ChangeTime(); b[0] != 0 { + hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0) + } + + // Prior to Go1.8, the Writer had a bug where it would output + // an invalid tar file in certain rare situations because the logic + // incorrectly believed that the old GNU format had a prefix field. + // This is wrong and leads to an output file that mangles the + // atime and ctime fields, which are often left unused. + // + // In order to continue reading tar files created by former, buggy + // versions of Go, we skeptically parse the atime and ctime fields. + // If we are unable to parse them and the prefix field looks like + // an ASCII string, then we fallback on the pre-Go1.8 behavior + // of treating these fields as the USTAR prefix field. + // + // Note that this will not use the fallback logic for all possible + // files generated by a pre-Go1.8 toolchain. If the generated file + // happened to have a prefix field that parses as valid + // atime and ctime fields (e.g., when they are valid octal strings), + // then it is impossible to distinguish between an valid GNU file + // and an invalid pre-Go1.8 file. + // + // See https://golang.org/issues/12594 + // See https://golang.org/issues/21005 + if p2.err != nil { + hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{} + ustar := tr.blk.USTAR() + if s := p.parseString(ustar.Prefix()); isASCII(s) { + prefix = s + } + hdr.Format = FormatUnknown // Buggy file is not GNU + } + } + if len(prefix) > 0 { + hdr.Name = prefix + "/" + hdr.Name + } + } + return hdr, &tr.blk, p.err +} + +// readOldGNUSparseMap reads the sparse map from the old GNU sparse format. +// The sparse map is stored in the tar header if it's small enough. +// If it's larger than four entries, then one or more extension headers are used +// to store the rest of the sparse map. +// +// The Header.Size does not reflect the size of any extended headers used. +// Thus, this function will read from the raw io.Reader to fetch extra headers. +// This method mutates blk in the process. +func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) { + // Make sure that the input format is GNU. + // Unfortunately, the STAR format also has a sparse header format that uses + // the same type flag but has a completely different layout. + if blk.GetFormat() != FormatGNU { + return nil, ErrHeader + } + hdr.Format.mayOnlyBe(FormatGNU) + + var p parser + hdr.Size = p.parseNumeric(blk.GNU().RealSize()) + if p.err != nil { + return nil, p.err + } + s := blk.GNU().Sparse() + spd := make(sparseDatas, 0, s.MaxEntries()) + for { + for i := 0; i < s.MaxEntries(); i++ { + // This termination condition is identical to GNU and BSD tar. + if s.Entry(i).Offset()[0] == 0x00 { + break // Don't return, need to process extended headers (even if empty) + } + offset := p.parseNumeric(s.Entry(i).Offset()) + length := p.parseNumeric(s.Entry(i).Length()) + if p.err != nil { + return nil, p.err + } + spd = append(spd, sparseEntry{Offset: offset, Length: length}) + } + + if s.IsExtended()[0] > 0 { + // There are more entries. Read an extension header and parse its entries. + if _, err := mustReadFull(tr.r, blk[:]); err != nil { + return nil, err + } + if tr.RawAccounting { + tr.rawBytes.Write(blk[:]) + } + s = blk.Sparse() + continue + } + return spd, nil // Done + } +} + +// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format +// version 1.0. The format of the sparse map consists of a series of +// newline-terminated numeric fields. The first field is the number of entries +// and is always present. Following this are the entries, consisting of two +// fields (offset, length). This function must stop reading at the end +// boundary of the block containing the last newline. +// +// Note that the GNU manual says that numeric values should be encoded in octal +// format. However, the GNU tar utility itself outputs these values in decimal. +// As such, this library treats values as being encoded in decimal. +func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { + var ( + cntNewline int64 + buf bytes.Buffer + blk block + ) + + // feedTokens copies data in blocks from r into buf until there are + // at least cnt newlines in buf. It will not read more blocks than needed. + feedTokens := func(n int64) error { + for cntNewline < n { + if _, err := mustReadFull(r, blk[:]); err != nil { + return err + } + buf.Write(blk[:]) + for _, c := range blk { + if c == '\n' { + cntNewline++ + } + } + } + return nil + } + + // nextToken gets the next token delimited by a newline. This assumes that + // at least one newline exists in the buffer. + nextToken := func() string { + cntNewline-- + tok, _ := buf.ReadString('\n') + return strings.TrimRight(tok, "\n") + } + + // Parse for the number of entries. + // Use integer overflow resistant math to check this. + if err := feedTokens(1); err != nil { + return nil, err + } + numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader + } + + // Parse for all member entries. + // numEntries is trusted after this since a potential attacker must have + // committed resources proportional to what this library used. + if err := feedTokens(2 * numEntries); err != nil { + return nil, err + } + spd := make(sparseDatas, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err1 := strconv.ParseInt(nextToken(), 10, 64) + length, err2 := strconv.ParseInt(nextToken(), 10, 64) + if err1 != nil || err2 != nil { + return nil, ErrHeader + } + spd = append(spd, sparseEntry{Offset: offset, Length: length}) + } + return spd, nil +} + +// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format +// version 0.1. The sparse map is stored in the PAX headers. +func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) { + // Get number of entries. + // Use integer overflow resistant math to check this. + numEntriesStr := paxHdrs[paxGNUSparseNumBlocks] + numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader + } + + // There should be two numbers in sparseMap for each entry. + sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",") + if len(sparseMap) == 1 && sparseMap[0] == "" { + sparseMap = sparseMap[:0] + } + if int64(len(sparseMap)) != 2*numEntries { + return nil, ErrHeader + } + + // Loop through the entries in the sparse map. + // numEntries is trusted now. + spd := make(sparseDatas, 0, numEntries) + for len(sparseMap) >= 2 { + offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64) + length, err2 := strconv.ParseInt(sparseMap[1], 10, 64) + if err1 != nil || err2 != nil { + return nil, ErrHeader + } + spd = append(spd, sparseEntry{Offset: offset, Length: length}) + sparseMap = sparseMap[2:] + } + return spd, nil +} + +// Read reads from the current file in the tar archive. +// It returns (0, io.EOF) when it reaches the end of that file, +// until Next is called to advance to the next file. +// +// If the current file is sparse, then the regions marked as a hole +// are read back as NUL-bytes. +// +// Calling Read on special types like TypeLink, TypeSymlink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what +// the Header.Size claims. +func (tr *Reader) Read(b []byte) (int, error) { + if tr.err != nil { + return 0, tr.err + } + n, err := tr.curr.Read(b) + if err != nil && err != io.EOF { + tr.err = err + } + return n, err +} + +// writeTo writes the content of the current file to w. +// The bytes written matches the number of remaining bytes in the current file. +// +// If the current file is sparse and w is an io.WriteSeeker, +// then writeTo uses Seek to skip past holes defined in Header.SparseHoles, +// assuming that skipped regions are filled with NULs. +// This always writes the last byte to ensure w is the right size. +// +// TODO(dsnet): Re-export this when adding sparse file support. +// See https://golang.org/issue/22735 +func (tr *Reader) writeTo(w io.Writer) (int64, error) { + if tr.err != nil { + return 0, tr.err + } + n, err := tr.curr.WriteTo(w) + if err != nil { + tr.err = err + } + return n, err +} + +// regFileReader is a fileReader for reading data from a regular file entry. +type regFileReader struct { + r io.Reader // Underlying Reader + nb int64 // Number of remaining bytes to read +} + +func (fr *regFileReader) Read(b []byte) (n int, err error) { + if int64(len(b)) > fr.nb { + b = b[:fr.nb] + } + if len(b) > 0 { + n, err = fr.r.Read(b) + fr.nb -= int64(n) + } + switch { + case err == io.EOF && fr.nb > 0: + return n, io.ErrUnexpectedEOF + case err == nil && fr.nb == 0: + return n, io.EOF + default: + return n, err + } +} + +func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) { + return io.Copy(w, struct{ io.Reader }{fr}) +} + +func (fr regFileReader) LogicalRemaining() int64 { + return fr.nb +} + +func (fr regFileReader) PhysicalRemaining() int64 { + return fr.nb +} + +// sparseFileReader is a fileReader for reading data from a sparse file entry. +type sparseFileReader struct { + fr fileReader // Underlying fileReader + sp sparseHoles // Normalized list of sparse holes + pos int64 // Current position in sparse file +} + +func (sr *sparseFileReader) Read(b []byte) (n int, err error) { + finished := int64(len(b)) >= sr.LogicalRemaining() + if finished { + b = b[:sr.LogicalRemaining()] + } + + b0 := b + endPos := sr.pos + int64(len(b)) + for endPos > sr.pos && err == nil { + var nf int // Bytes read in fragment + holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset() + if sr.pos < holeStart { // In a data fragment + bf := b[:min(int64(len(b)), holeStart-sr.pos)] + nf, err = tryReadFull(sr.fr, bf) + } else { // In a hole fragment + bf := b[:min(int64(len(b)), holeEnd-sr.pos)] + nf, err = tryReadFull(zeroReader{}, bf) + } + b = b[nf:] + sr.pos += int64(nf) + if sr.pos >= holeEnd && len(sr.sp) > 1 { + sr.sp = sr.sp[1:] // Ensure last fragment always remains + } + } + + n = len(b0) - len(b) + switch { + case err == io.EOF: + return n, errMissData // Less data in dense file than sparse file + case err != nil: + return n, err + case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0: + return n, errUnrefData // More data in dense file than sparse file + case finished: + return n, io.EOF + default: + return n, nil + } +} + +func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) { + ws, ok := w.(io.WriteSeeker) + if ok { + if _, err := ws.Seek(0, io.SeekCurrent); err != nil { + ok = false // Not all io.Seeker can really seek + } + } + if !ok { + return io.Copy(w, struct{ io.Reader }{sr}) + } + + var writeLastByte bool + pos0 := sr.pos + for sr.LogicalRemaining() > 0 && !writeLastByte && err == nil { + var nf int64 // Size of fragment + holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset() + if sr.pos < holeStart { // In a data fragment + nf = holeStart - sr.pos + nf, err = io.CopyN(ws, sr.fr, nf) + } else { // In a hole fragment + nf = holeEnd - sr.pos + if sr.PhysicalRemaining() == 0 { + writeLastByte = true + nf-- + } + _, err = ws.Seek(nf, io.SeekCurrent) + } + sr.pos += nf + if sr.pos >= holeEnd && len(sr.sp) > 1 { + sr.sp = sr.sp[1:] // Ensure last fragment always remains + } + } + + // If the last fragment is a hole, then seek to 1-byte before EOF, and + // write a single byte to ensure the file is the right size. + if writeLastByte && err == nil { + _, err = ws.Write([]byte{0}) + sr.pos++ + } + + n = sr.pos - pos0 + switch { + case err == io.EOF: + return n, errMissData // Less data in dense file than sparse file + case err != nil: + return n, err + case sr.LogicalRemaining() == 0 && sr.PhysicalRemaining() > 0: + return n, errUnrefData // More data in dense file than sparse file + default: + return n, nil + } +} + +func (sr sparseFileReader) LogicalRemaining() int64 { + return sr.sp[len(sr.sp)-1].endOffset() - sr.pos +} +func (sr sparseFileReader) PhysicalRemaining() int64 { + return sr.fr.PhysicalRemaining() +} + +type zeroReader struct{} + +func (zeroReader) Read(b []byte) (int, error) { + for i := range b { + b[i] = 0 + } + return len(b), nil +} + +// mustReadFull is like io.ReadFull except it returns +// io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read. +func mustReadFull(r io.Reader, b []byte) (int, error) { + n, err := tryReadFull(r, b) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return n, err +} + +// tryReadFull is like io.ReadFull except it returns +// io.EOF when it is hit before len(b) bytes are read. +func tryReadFull(r io.Reader, b []byte) (n int, err error) { + for len(b) > n && err == nil { + var nn int + nn, err = r.Read(b[n:]) + n += nn + } + if len(b) == n && err == io.EOF { + err = nil + } + return n, err +} + +// discard skips n bytes in r, reporting an error if unable to do so. +func discard(tr *Reader, n int64) error { + var seekSkipped, copySkipped int64 + var err error + r := tr.r + if tr.RawAccounting { + + copySkipped, err = io.CopyN(tr.rawBytes, tr.r, n) + goto out + } + + // If possible, Seek to the last byte before the end of the data section. + // Do this because Seek is often lazy about reporting errors; this will mask + // the fact that the stream may be truncated. We can rely on the + // io.CopyN done shortly afterwards to trigger any IO errors. + if sr, ok := r.(io.Seeker); ok && n > 1 { + // Not all io.Seeker can actually Seek. For example, os.Stdin implements + // io.Seeker, but calling Seek always returns an error and performs + // no action. Thus, we try an innocent seek to the current position + // to see if Seek is really supported. + pos1, err := sr.Seek(0, io.SeekCurrent) + if pos1 >= 0 && err == nil { + // Seek seems supported, so perform the real Seek. + pos2, err := sr.Seek(n-1, io.SeekCurrent) + if pos2 < 0 || err != nil { + return err + } + seekSkipped = pos2 - pos1 + } + } + + copySkipped, err = io.CopyN(ioutil.Discard, r, n-seekSkipped) +out: + if err == io.EOF && seekSkipped+copySkipped < n { + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go new file mode 100644 index 00000000..cf9cc79c --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux dragonfly openbsd solaris + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atim.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctim.Unix()) +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go new file mode 100644 index 00000000..6f17dbe3 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd netbsd + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atimespec.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctimespec.Unix()) +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go b/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go new file mode 100644 index 00000000..868105f3 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go @@ -0,0 +1,96 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin dragonfly freebsd openbsd netbsd solaris + +package tar + +import ( + "os" + "os/user" + "runtime" + "strconv" + "sync" + "syscall" +) + +func init() { + sysStat = statUnix +} + +// userMap and groupMap caches UID and GID lookups for performance reasons. +// The downside is that renaming uname or gname by the OS never takes effect. +var userMap, groupMap sync.Map // map[int]string + +func statUnix(fi os.FileInfo, h *Header) error { + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + h.Uid = int(sys.Uid) + h.Gid = int(sys.Gid) + + // Best effort at populating Uname and Gname. + // The os/user functions may fail for any number of reasons + // (not implemented on that platform, cgo not enabled, etc). + if u, ok := userMap.Load(h.Uid); ok { + h.Uname = u.(string) + } else if u, err := user.LookupId(strconv.Itoa(h.Uid)); err == nil { + h.Uname = u.Username + userMap.Store(h.Uid, h.Uname) + } + if g, ok := groupMap.Load(h.Gid); ok { + h.Gname = g.(string) + } else if g, err := user.LookupGroupId(strconv.Itoa(h.Gid)); err == nil { + h.Gname = g.Name + groupMap.Store(h.Gid, h.Gname) + } + + h.AccessTime = statAtime(sys) + h.ChangeTime = statCtime(sys) + + // Best effort at populating Devmajor and Devminor. + if h.Typeflag == TypeChar || h.Typeflag == TypeBlock { + dev := uint64(sys.Rdev) // May be int32 or uint32 + switch runtime.GOOS { + case "linux": + // Copied from golang.org/x/sys/unix/dev_linux.go. + major := uint32((dev & 0x00000000000fff00) >> 8) + major |= uint32((dev & 0xfffff00000000000) >> 32) + minor := uint32((dev & 0x00000000000000ff) >> 0) + minor |= uint32((dev & 0x00000ffffff00000) >> 12) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "darwin": + // Copied from golang.org/x/sys/unix/dev_darwin.go. + major := uint32((dev >> 24) & 0xff) + minor := uint32(dev & 0xffffff) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "dragonfly": + // Copied from golang.org/x/sys/unix/dev_dragonfly.go. + major := uint32((dev >> 8) & 0xff) + minor := uint32(dev & 0xffff00ff) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "freebsd": + // Copied from golang.org/x/sys/unix/dev_freebsd.go. + major := uint32((dev >> 8) & 0xff) + minor := uint32(dev & 0xffff00ff) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "netbsd": + // Copied from golang.org/x/sys/unix/dev_netbsd.go. + major := uint32((dev & 0x000fff00) >> 8) + minor := uint32((dev & 0x000000ff) >> 0) + minor |= uint32((dev & 0xfff00000) >> 12) + h.Devmajor, h.Devminor = int64(major), int64(minor) + case "openbsd": + // Copied from golang.org/x/sys/unix/dev_openbsd.go. + major := uint32((dev & 0x0000ff00) >> 8) + minor := uint32((dev & 0x000000ff) >> 0) + minor |= uint32((dev & 0xffff0000) >> 8) + h.Devmajor, h.Devminor = int64(major), int64(minor) + default: + // TODO: Implement solaris (see https://golang.org/issue/8106) + } + } + return nil +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/strconv.go b/vendor/github.com/vbatts/tar-split/archive/tar/strconv.go new file mode 100644 index 00000000..d144485a --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/strconv.go @@ -0,0 +1,326 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "time" +) + +// hasNUL reports whether the NUL character exists within s. +func hasNUL(s string) bool { + return strings.IndexByte(s, 0) >= 0 +} + +// isASCII reports whether the input is an ASCII C-style string. +func isASCII(s string) bool { + for _, c := range s { + if c >= 0x80 || c == 0x00 { + return false + } + } + return true +} + +// toASCII converts the input to an ASCII C-style string. +// This a best effort conversion, so invalid characters are dropped. +func toASCII(s string) string { + if isASCII(s) { + return s + } + b := make([]byte, 0, len(s)) + for _, c := range s { + if c < 0x80 && c != 0x00 { + b = append(b, byte(c)) + } + } + return string(b) +} + +type parser struct { + err error // Last error seen +} + +type formatter struct { + err error // Last error seen +} + +// parseString parses bytes as a NUL-terminated C-style string. +// If a NUL byte is not found then the whole slice is returned as a string. +func (*parser) parseString(b []byte) string { + if i := bytes.IndexByte(b, 0); i >= 0 { + return string(b[:i]) + } + return string(b) +} + +// formatString copies s into b, NUL-terminating if possible. +func (f *formatter) formatString(b []byte, s string) { + if len(s) > len(b) { + f.err = ErrFieldTooLong + } + copy(b, s) + if len(s) < len(b) { + b[len(s)] = 0 + } + + // Some buggy readers treat regular files with a trailing slash + // in the V7 path field as a directory even though the full path + // recorded elsewhere (e.g., via PAX record) contains no trailing slash. + if len(s) > len(b) && b[len(b)-1] == '/' { + n := len(strings.TrimRight(s[:len(b)], "/")) + b[n] = 0 // Replace trailing slash with NUL terminator + } +} + +// fitsInBase256 reports whether x can be encoded into n bytes using base-256 +// encoding. Unlike octal encoding, base-256 encoding does not require that the +// string ends with a NUL character. Thus, all n bytes are available for output. +// +// If operating in binary mode, this assumes strict GNU binary mode; which means +// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is +// equivalent to the sign bit in two's complement form. +func fitsInBase256(n int, x int64) bool { + binBits := uint(n-1) * 8 + return n >= 9 || (x >= -1< 0 && b[0]&0x80 != 0 { + // Handling negative numbers relies on the following identity: + // -a-1 == ^a + // + // If the number is negative, we use an inversion mask to invert the + // data bytes and treat the value as an unsigned number. + var inv byte // 0x00 if positive or zero, 0xff if negative + if b[0]&0x40 != 0 { + inv = 0xff + } + + var x uint64 + for i, c := range b { + c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing + if i == 0 { + c &= 0x7f // Ignore signal bit in first byte + } + if (x >> 56) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + x = x<<8 | uint64(c) + } + if (x >> 63) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + if inv == 0xff { + return ^int64(x) + } + return int64(x) + } + + // Normal case is base-8 (octal) format. + return p.parseOctal(b) +} + +// formatNumeric encodes x into b using base-8 (octal) encoding if possible. +// Otherwise it will attempt to use base-256 (binary) encoding. +func (f *formatter) formatNumeric(b []byte, x int64) { + if fitsInOctal(len(b), x) { + f.formatOctal(b, x) + return + } + + if fitsInBase256(len(b), x) { + for i := len(b) - 1; i >= 0; i-- { + b[i] = byte(x) + x >>= 8 + } + b[0] |= 0x80 // Highest bit indicates binary format + return + } + + f.formatOctal(b, 0) // Last resort, just write zero + f.err = ErrFieldTooLong +} + +func (p *parser) parseOctal(b []byte) int64 { + // Because unused fields are filled with NULs, we need + // to skip leading NULs. Fields may also be padded with + // spaces or NULs. + // So we remove leading and trailing NULs and spaces to + // be sure. + b = bytes.Trim(b, " \x00") + + if len(b) == 0 { + return 0 + } + x, perr := strconv.ParseUint(p.parseString(b), 8, 64) + if perr != nil { + p.err = ErrHeader + } + return int64(x) +} + +func (f *formatter) formatOctal(b []byte, x int64) { + if !fitsInOctal(len(b), x) { + x = 0 // Last resort, just write zero + f.err = ErrFieldTooLong + } + + s := strconv.FormatInt(x, 8) + // Add leading zeros, but leave room for a NUL. + if n := len(b) - len(s) - 1; n > 0 { + s = strings.Repeat("0", n) + s + } + f.formatString(b, s) +} + +// fitsInOctal reports whether the integer x fits in a field n-bytes long +// using octal encoding with the appropriate NUL terminator. +func fitsInOctal(n int, x int64) bool { + octBits := uint(n-1) * 3 + return x >= 0 && (n >= 22 || x < 1<= 0 { + ss, sn = s[:pos], s[pos+1:] + } + + // Parse the seconds. + secs, err := strconv.ParseInt(ss, 10, 64) + if err != nil { + return time.Time{}, ErrHeader + } + if len(sn) == 0 { + return time.Unix(secs, 0), nil // No sub-second values + } + + // Parse the nanoseconds. + if strings.Trim(sn, "0123456789") != "" { + return time.Time{}, ErrHeader + } + if len(sn) < maxNanoSecondDigits { + sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad + } else { + sn = sn[:maxNanoSecondDigits] // Right truncate + } + nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed + if len(ss) > 0 && ss[0] == '-' { + return time.Unix(secs, -1*nsecs), nil // Negative correction + } + return time.Unix(secs, nsecs), nil +} + +// formatPAXTime converts ts into a time of the form %d.%d as described in the +// PAX specification. This function is capable of negative timestamps. +func formatPAXTime(ts time.Time) (s string) { + secs, nsecs := ts.Unix(), ts.Nanosecond() + if nsecs == 0 { + return strconv.FormatInt(secs, 10) + } + + // If seconds is negative, then perform correction. + sign := "" + if secs < 0 { + sign = "-" // Remember sign + secs = -(secs + 1) // Add a second to secs + nsecs = -(nsecs - 1E9) // Take that second away from nsecs + } + return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0") +} + +// parsePAXRecord parses the input PAX record string into a key-value pair. +// If parsing is successful, it will slice off the currently read record and +// return the remainder as r. +func parsePAXRecord(s string) (k, v, r string, err error) { + // The size field ends at the first space. + sp := strings.IndexByte(s, ' ') + if sp == -1 { + return "", "", s, ErrHeader + } + + // Parse the first token as a decimal integer. + n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int + if perr != nil || n < 5 || int64(len(s)) < n { + return "", "", s, ErrHeader + } + + // Extract everything between the space and the final newline. + rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:] + if nl != "\n" { + return "", "", s, ErrHeader + } + + // The first equals separates the key from the value. + eq := strings.IndexByte(rec, '=') + if eq == -1 { + return "", "", s, ErrHeader + } + k, v = rec[:eq], rec[eq+1:] + + if !validPAXRecord(k, v) { + return "", "", s, ErrHeader + } + return k, v, rem, nil +} + +// formatPAXRecord formats a single PAX record, prefixing it with the +// appropriate length. +func formatPAXRecord(k, v string) (string, error) { + if !validPAXRecord(k, v) { + return "", ErrHeader + } + + const padding = 3 // Extra padding for ' ', '=', and '\n' + size := len(k) + len(v) + padding + size += len(strconv.Itoa(size)) + record := strconv.Itoa(size) + " " + k + "=" + v + "\n" + + // Final adjustment if adding size field increased the record size. + if len(record) != size { + size = len(record) + record = strconv.Itoa(size) + " " + k + "=" + v + "\n" + } + return record, nil +} + +// validPAXRecord reports whether the key-value pair is valid where each +// record is formatted as: +// "%d %s=%s\n" % (size, key, value) +// +// Keys and values should be UTF-8, but the number of bad writers out there +// forces us to be a more liberal. +// Thus, we only reject all keys with NUL, and only reject NULs in values +// for the PAX version of the USTAR string fields. +// The key must not contain an '=' character. +func validPAXRecord(k, v string) bool { + if k == "" || strings.IndexByte(k, '=') >= 0 { + return false + } + switch k { + case paxPath, paxLinkpath, paxUname, paxGname: + return !hasNUL(v) + default: + return !hasNUL(k) + } +} diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/writer.go b/vendor/github.com/vbatts/tar-split/archive/tar/writer.go new file mode 100644 index 00000000..e80498d0 --- /dev/null +++ b/vendor/github.com/vbatts/tar-split/archive/tar/writer.go @@ -0,0 +1,653 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "fmt" + "io" + "path" + "sort" + "strings" + "time" +) + +// Writer provides sequential writing of a tar archive. +// Write.WriteHeader begins a new file with the provided Header, +// and then Writer can be treated as an io.Writer to supply that file's data. +type Writer struct { + w io.Writer + pad int64 // Amount of padding to write after current file entry + curr fileWriter // Writer for current file entry + hdr Header // Shallow copy of Header that is safe for mutations + blk block // Buffer to use as temporary local storage + + // err is a persistent error. + // It is only the responsibility of every exported method of Writer to + // ensure that this error is sticky. + err error +} + +// NewWriter creates a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { + return &Writer{w: w, curr: ®FileWriter{w, 0}} +} + +type fileWriter interface { + io.Writer + fileState + + ReadFrom(io.Reader) (int64, error) +} + +// Flush finishes writing the current file's block padding. +// The current file must be fully written before Flush can be called. +// +// This is unnecessary as the next call to WriteHeader or Close +// will implicitly flush out the file's padding. +func (tw *Writer) Flush() error { + if tw.err != nil { + return tw.err + } + if nb := tw.curr.LogicalRemaining(); nb > 0 { + return fmt.Errorf("archive/tar: missed writing %d bytes", nb) + } + if _, tw.err = tw.w.Write(zeroBlock[:tw.pad]); tw.err != nil { + return tw.err + } + tw.pad = 0 + return nil +} + +// WriteHeader writes hdr and prepares to accept the file's contents. +// The Header.Size determines how many bytes can be written for the next file. +// If the current file is not fully written, then this returns an error. +// This implicitly flushes any padding necessary before writing the header. +func (tw *Writer) WriteHeader(hdr *Header) error { + if err := tw.Flush(); err != nil { + return err + } + tw.hdr = *hdr // Shallow copy of Header + + // Avoid usage of the legacy TypeRegA flag, and automatically promote + // it to use TypeReg or TypeDir. + if tw.hdr.Typeflag == TypeRegA { + if strings.HasSuffix(tw.hdr.Name, "/") { + tw.hdr.Typeflag = TypeDir + } else { + tw.hdr.Typeflag = TypeReg + } + } + + // Round ModTime and ignore AccessTime and ChangeTime unless + // the format is explicitly chosen. + // This ensures nominal usage of WriteHeader (without specifying the format) + // does not always result in the PAX format being chosen, which + // causes a 1KiB increase to every header. + if tw.hdr.Format == FormatUnknown { + tw.hdr.ModTime = tw.hdr.ModTime.Round(time.Second) + tw.hdr.AccessTime = time.Time{} + tw.hdr.ChangeTime = time.Time{} + } + + allowedFormats, paxHdrs, err := tw.hdr.allowedFormats() + switch { + case allowedFormats.has(FormatUSTAR): + tw.err = tw.writeUSTARHeader(&tw.hdr) + return tw.err + case allowedFormats.has(FormatPAX): + tw.err = tw.writePAXHeader(&tw.hdr, paxHdrs) + return tw.err + case allowedFormats.has(FormatGNU): + tw.err = tw.writeGNUHeader(&tw.hdr) + return tw.err + default: + return err // Non-fatal error + } +} + +func (tw *Writer) writeUSTARHeader(hdr *Header) error { + // Check if we can use USTAR prefix/suffix splitting. + var namePrefix string + if prefix, suffix, ok := splitUSTARPath(hdr.Name); ok { + namePrefix, hdr.Name = prefix, suffix + } + + // Pack the main header. + var f formatter + blk := tw.templateV7Plus(hdr, f.formatString, f.formatOctal) + f.formatString(blk.USTAR().Prefix(), namePrefix) + blk.SetFormat(FormatUSTAR) + if f.err != nil { + return f.err // Should never happen since header is validated + } + return tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag) +} + +func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error { + realName, realSize := hdr.Name, hdr.Size + + // TODO(dsnet): Re-enable this when adding sparse support. + // See https://golang.org/issue/22735 + /* + // Handle sparse files. + var spd sparseDatas + var spb []byte + if len(hdr.SparseHoles) > 0 { + sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map + sph = alignSparseEntries(sph, hdr.Size) + spd = invertSparseEntries(sph, hdr.Size) + + // Format the sparse map. + hdr.Size = 0 // Replace with encoded size + spb = append(strconv.AppendInt(spb, int64(len(spd)), 10), '\n') + for _, s := range spd { + hdr.Size += s.Length + spb = append(strconv.AppendInt(spb, s.Offset, 10), '\n') + spb = append(strconv.AppendInt(spb, s.Length, 10), '\n') + } + pad := blockPadding(int64(len(spb))) + spb = append(spb, zeroBlock[:pad]...) + hdr.Size += int64(len(spb)) // Accounts for encoded sparse map + + // Add and modify appropriate PAX records. + dir, file := path.Split(realName) + hdr.Name = path.Join(dir, "GNUSparseFile.0", file) + paxHdrs[paxGNUSparseMajor] = "1" + paxHdrs[paxGNUSparseMinor] = "0" + paxHdrs[paxGNUSparseName] = realName + paxHdrs[paxGNUSparseRealSize] = strconv.FormatInt(realSize, 10) + paxHdrs[paxSize] = strconv.FormatInt(hdr.Size, 10) + delete(paxHdrs, paxPath) // Recorded by paxGNUSparseName + } + */ + _ = realSize + + // Write PAX records to the output. + isGlobal := hdr.Typeflag == TypeXGlobalHeader + if len(paxHdrs) > 0 || isGlobal { + // Sort keys for deterministic ordering. + var keys []string + for k := range paxHdrs { + keys = append(keys, k) + } + sort.Strings(keys) + + // Write each record to a buffer. + var buf strings.Builder + for _, k := range keys { + rec, err := formatPAXRecord(k, paxHdrs[k]) + if err != nil { + return err + } + buf.WriteString(rec) + } + + // Write the extended header file. + var name string + var flag byte + if isGlobal { + name = realName + if name == "" { + name = "GlobalHead.0.0" + } + flag = TypeXGlobalHeader + } else { + dir, file := path.Split(realName) + name = path.Join(dir, "PaxHeaders.0", file) + flag = TypeXHeader + } + data := buf.String() + if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal { + return err // Global headers return here + } + } + + // Pack the main header. + var f formatter // Ignore errors since they are expected + fmtStr := func(b []byte, s string) { f.formatString(b, toASCII(s)) } + blk := tw.templateV7Plus(hdr, fmtStr, f.formatOctal) + blk.SetFormat(FormatPAX) + if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil { + return err + } + + // TODO(dsnet): Re-enable this when adding sparse support. + // See https://golang.org/issue/22735 + /* + // Write the sparse map and setup the sparse writer if necessary. + if len(spd) > 0 { + // Use tw.curr since the sparse map is accounted for in hdr.Size. + if _, err := tw.curr.Write(spb); err != nil { + return err + } + tw.curr = &sparseFileWriter{tw.curr, spd, 0} + } + */ + return nil +} + +func (tw *Writer) writeGNUHeader(hdr *Header) error { + // Use long-link files if Name or Linkname exceeds the field size. + const longName = "././@LongLink" + if len(hdr.Name) > nameSize { + data := hdr.Name + "\x00" + if err := tw.writeRawFile(longName, data, TypeGNULongName, FormatGNU); err != nil { + return err + } + } + if len(hdr.Linkname) > nameSize { + data := hdr.Linkname + "\x00" + if err := tw.writeRawFile(longName, data, TypeGNULongLink, FormatGNU); err != nil { + return err + } + } + + // Pack the main header. + var f formatter // Ignore errors since they are expected + var spd sparseDatas + var spb []byte + blk := tw.templateV7Plus(hdr, f.formatString, f.formatNumeric) + if !hdr.AccessTime.IsZero() { + f.formatNumeric(blk.GNU().AccessTime(), hdr.AccessTime.Unix()) + } + if !hdr.ChangeTime.IsZero() { + f.formatNumeric(blk.GNU().ChangeTime(), hdr.ChangeTime.Unix()) + } + // TODO(dsnet): Re-enable this when adding sparse support. + // See https://golang.org/issue/22735 + /* + if hdr.Typeflag == TypeGNUSparse { + sph := append([]sparseEntry{}, hdr.SparseHoles...) // Copy sparse map + sph = alignSparseEntries(sph, hdr.Size) + spd = invertSparseEntries(sph, hdr.Size) + + // Format the sparse map. + formatSPD := func(sp sparseDatas, sa sparseArray) sparseDatas { + for i := 0; len(sp) > 0 && i < sa.MaxEntries(); i++ { + f.formatNumeric(sa.Entry(i).Offset(), sp[0].Offset) + f.formatNumeric(sa.Entry(i).Length(), sp[0].Length) + sp = sp[1:] + } + if len(sp) > 0 { + sa.IsExtended()[0] = 1 + } + return sp + } + sp2 := formatSPD(spd, blk.GNU().Sparse()) + for len(sp2) > 0 { + var spHdr block + sp2 = formatSPD(sp2, spHdr.Sparse()) + spb = append(spb, spHdr[:]...) + } + + // Update size fields in the header block. + realSize := hdr.Size + hdr.Size = 0 // Encoded size; does not account for encoded sparse map + for _, s := range spd { + hdr.Size += s.Length + } + copy(blk.V7().Size(), zeroBlock[:]) // Reset field + f.formatNumeric(blk.V7().Size(), hdr.Size) + f.formatNumeric(blk.GNU().RealSize(), realSize) + } + */ + blk.SetFormat(FormatGNU) + if err := tw.writeRawHeader(blk, hdr.Size, hdr.Typeflag); err != nil { + return err + } + + // Write the extended sparse map and setup the sparse writer if necessary. + if len(spd) > 0 { + // Use tw.w since the sparse map is not accounted for in hdr.Size. + if _, err := tw.w.Write(spb); err != nil { + return err + } + tw.curr = &sparseFileWriter{tw.curr, spd, 0} + } + return nil +} + +type ( + stringFormatter func([]byte, string) + numberFormatter func([]byte, int64) +) + +// templateV7Plus fills out the V7 fields of a block using values from hdr. +// It also fills out fields (uname, gname, devmajor, devminor) that are +// shared in the USTAR, PAX, and GNU formats using the provided formatters. +// +// The block returned is only valid until the next call to +// templateV7Plus or writeRawFile. +func (tw *Writer) templateV7Plus(hdr *Header, fmtStr stringFormatter, fmtNum numberFormatter) *block { + tw.blk.Reset() + + modTime := hdr.ModTime + if modTime.IsZero() { + modTime = time.Unix(0, 0) + } + + v7 := tw.blk.V7() + v7.TypeFlag()[0] = hdr.Typeflag + fmtStr(v7.Name(), hdr.Name) + fmtStr(v7.LinkName(), hdr.Linkname) + fmtNum(v7.Mode(), hdr.Mode) + fmtNum(v7.UID(), int64(hdr.Uid)) + fmtNum(v7.GID(), int64(hdr.Gid)) + fmtNum(v7.Size(), hdr.Size) + fmtNum(v7.ModTime(), modTime.Unix()) + + ustar := tw.blk.USTAR() + fmtStr(ustar.UserName(), hdr.Uname) + fmtStr(ustar.GroupName(), hdr.Gname) + fmtNum(ustar.DevMajor(), hdr.Devmajor) + fmtNum(ustar.DevMinor(), hdr.Devminor) + + return &tw.blk +} + +// writeRawFile writes a minimal file with the given name and flag type. +// It uses format to encode the header format and will write data as the body. +// It uses default values for all of the other fields (as BSD and GNU tar does). +func (tw *Writer) writeRawFile(name, data string, flag byte, format Format) error { + tw.blk.Reset() + + // Best effort for the filename. + name = toASCII(name) + if len(name) > nameSize { + name = name[:nameSize] + } + name = strings.TrimRight(name, "/") + + var f formatter + v7 := tw.blk.V7() + v7.TypeFlag()[0] = flag + f.formatString(v7.Name(), name) + f.formatOctal(v7.Mode(), 0) + f.formatOctal(v7.UID(), 0) + f.formatOctal(v7.GID(), 0) + f.formatOctal(v7.Size(), int64(len(data))) // Must be < 8GiB + f.formatOctal(v7.ModTime(), 0) + tw.blk.SetFormat(format) + if f.err != nil { + return f.err // Only occurs if size condition is violated + } + + // Write the header and data. + if err := tw.writeRawHeader(&tw.blk, int64(len(data)), flag); err != nil { + return err + } + _, err := io.WriteString(tw, data) + return err +} + +// writeRawHeader writes the value of blk, regardless of its value. +// It sets up the Writer such that it can accept a file of the given size. +// If the flag is a special header-only flag, then the size is treated as zero. +func (tw *Writer) writeRawHeader(blk *block, size int64, flag byte) error { + if err := tw.Flush(); err != nil { + return err + } + if _, err := tw.w.Write(blk[:]); err != nil { + return err + } + if isHeaderOnlyType(flag) { + size = 0 + } + tw.curr = ®FileWriter{tw.w, size} + tw.pad = blockPadding(size) + return nil +} + +// splitUSTARPath splits a path according to USTAR prefix and suffix rules. +// If the path is not splittable, then it will return ("", "", false). +func splitUSTARPath(name string) (prefix, suffix string, ok bool) { + length := len(name) + if length <= nameSize || !isASCII(name) { + return "", "", false + } else if length > prefixSize+1 { + length = prefixSize + 1 + } else if name[length-1] == '/' { + length-- + } + + i := strings.LastIndex(name[:length], "/") + nlen := len(name) - i - 1 // nlen is length of suffix + plen := i // plen is length of prefix + if i <= 0 || nlen > nameSize || nlen == 0 || plen > prefixSize { + return "", "", false + } + return name[:i], name[i+1:], true +} + +// Write writes to the current file in the tar archive. +// Write returns the error ErrWriteTooLong if more than +// Header.Size bytes are written after WriteHeader. +// +// Calling Write on special types like TypeLink, TypeSymlink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns (0, ErrWriteTooLong) regardless +// of what the Header.Size claims. +func (tw *Writer) Write(b []byte) (int, error) { + if tw.err != nil { + return 0, tw.err + } + n, err := tw.curr.Write(b) + if err != nil && err != ErrWriteTooLong { + tw.err = err + } + return n, err +} + +// readFrom populates the content of the current file by reading from r. +// The bytes read must match the number of remaining bytes in the current file. +// +// If the current file is sparse and r is an io.ReadSeeker, +// then readFrom uses Seek to skip past holes defined in Header.SparseHoles, +// assuming that skipped regions are all NULs. +// This always reads the last byte to ensure r is the right size. +// +// TODO(dsnet): Re-export this when adding sparse file support. +// See https://golang.org/issue/22735 +func (tw *Writer) readFrom(r io.Reader) (int64, error) { + if tw.err != nil { + return 0, tw.err + } + n, err := tw.curr.ReadFrom(r) + if err != nil && err != ErrWriteTooLong { + tw.err = err + } + return n, err +} + +// Close closes the tar archive by flushing the padding, and writing the footer. +// If the current file (from a prior call to WriteHeader) is not fully written, +// then this returns an error. +func (tw *Writer) Close() error { + if tw.err == ErrWriteAfterClose { + return nil + } + if tw.err != nil { + return tw.err + } + + // Trailer: two zero blocks. + err := tw.Flush() + for i := 0; i < 2 && err == nil; i++ { + _, err = tw.w.Write(zeroBlock[:]) + } + + // Ensure all future actions are invalid. + tw.err = ErrWriteAfterClose + return err // Report IO errors +} + +// regFileWriter is a fileWriter for writing data to a regular file entry. +type regFileWriter struct { + w io.Writer // Underlying Writer + nb int64 // Number of remaining bytes to write +} + +func (fw *regFileWriter) Write(b []byte) (n int, err error) { + overwrite := int64(len(b)) > fw.nb + if overwrite { + b = b[:fw.nb] + } + if len(b) > 0 { + n, err = fw.w.Write(b) + fw.nb -= int64(n) + } + switch { + case err != nil: + return n, err + case overwrite: + return n, ErrWriteTooLong + default: + return n, nil + } +} + +func (fw *regFileWriter) ReadFrom(r io.Reader) (int64, error) { + return io.Copy(struct{ io.Writer }{fw}, r) +} + +func (fw regFileWriter) LogicalRemaining() int64 { + return fw.nb +} +func (fw regFileWriter) PhysicalRemaining() int64 { + return fw.nb +} + +// sparseFileWriter is a fileWriter for writing data to a sparse file entry. +type sparseFileWriter struct { + fw fileWriter // Underlying fileWriter + sp sparseDatas // Normalized list of data fragments + pos int64 // Current position in sparse file +} + +func (sw *sparseFileWriter) Write(b []byte) (n int, err error) { + overwrite := int64(len(b)) > sw.LogicalRemaining() + if overwrite { + b = b[:sw.LogicalRemaining()] + } + + b0 := b + endPos := sw.pos + int64(len(b)) + for endPos > sw.pos && err == nil { + var nf int // Bytes written in fragment + dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset() + if sw.pos < dataStart { // In a hole fragment + bf := b[:min(int64(len(b)), dataStart-sw.pos)] + nf, err = zeroWriter{}.Write(bf) + } else { // In a data fragment + bf := b[:min(int64(len(b)), dataEnd-sw.pos)] + nf, err = sw.fw.Write(bf) + } + b = b[nf:] + sw.pos += int64(nf) + if sw.pos >= dataEnd && len(sw.sp) > 1 { + sw.sp = sw.sp[1:] // Ensure last fragment always remains + } + } + + n = len(b0) - len(b) + switch { + case err == ErrWriteTooLong: + return n, errMissData // Not possible; implies bug in validation logic + case err != nil: + return n, err + case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0: + return n, errUnrefData // Not possible; implies bug in validation logic + case overwrite: + return n, ErrWriteTooLong + default: + return n, nil + } +} + +func (sw *sparseFileWriter) ReadFrom(r io.Reader) (n int64, err error) { + rs, ok := r.(io.ReadSeeker) + if ok { + if _, err := rs.Seek(0, io.SeekCurrent); err != nil { + ok = false // Not all io.Seeker can really seek + } + } + if !ok { + return io.Copy(struct{ io.Writer }{sw}, r) + } + + var readLastByte bool + pos0 := sw.pos + for sw.LogicalRemaining() > 0 && !readLastByte && err == nil { + var nf int64 // Size of fragment + dataStart, dataEnd := sw.sp[0].Offset, sw.sp[0].endOffset() + if sw.pos < dataStart { // In a hole fragment + nf = dataStart - sw.pos + if sw.PhysicalRemaining() == 0 { + readLastByte = true + nf-- + } + _, err = rs.Seek(nf, io.SeekCurrent) + } else { // In a data fragment + nf = dataEnd - sw.pos + nf, err = io.CopyN(sw.fw, rs, nf) + } + sw.pos += nf + if sw.pos >= dataEnd && len(sw.sp) > 1 { + sw.sp = sw.sp[1:] // Ensure last fragment always remains + } + } + + // If the last fragment is a hole, then seek to 1-byte before EOF, and + // read a single byte to ensure the file is the right size. + if readLastByte && err == nil { + _, err = mustReadFull(rs, []byte{0}) + sw.pos++ + } + + n = sw.pos - pos0 + switch { + case err == io.EOF: + return n, io.ErrUnexpectedEOF + case err == ErrWriteTooLong: + return n, errMissData // Not possible; implies bug in validation logic + case err != nil: + return n, err + case sw.LogicalRemaining() == 0 && sw.PhysicalRemaining() > 0: + return n, errUnrefData // Not possible; implies bug in validation logic + default: + return n, ensureEOF(rs) + } +} + +func (sw sparseFileWriter) LogicalRemaining() int64 { + return sw.sp[len(sw.sp)-1].endOffset() - sw.pos +} +func (sw sparseFileWriter) PhysicalRemaining() int64 { + return sw.fw.PhysicalRemaining() +} + +// zeroWriter may only be written with NULs, otherwise it returns errWriteHole. +type zeroWriter struct{} + +func (zeroWriter) Write(b []byte) (int, error) { + for i, c := range b { + if c != 0 { + return i, errWriteHole + } + } + return len(b), nil +} + +// ensureEOF checks whether r is at EOF, reporting ErrWriteTooLong if not so. +func ensureEOF(r io.Reader) error { + n, err := tryReadFull(r, []byte{0}) + switch { + case n > 0: + return ErrWriteTooLong + case err == io.EOF: + return nil + default: + return err + } +} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go index 94c71ac1..5dfacbb9 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.11 && gc && !purego -// +build go1.11,gc,!purego +//go:build gc && !purego +// +build gc,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s index 63cae9e6..f1f66230 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.11 && gc && !purego -// +build go1.11,gc,!purego +//go:build gc && !purego +// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go index 025b4989..02ff3d05 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!arm64 && !s390x && !ppc64le) || (arm64 && !go1.11) || !gc || purego -// +build !arm64,!s390x,!ppc64le arm64,!go1.11 !gc purego +//go:build (!arm64 && !s390x && !ppc64le) || !gc || purego +// +build !arm64,!s390x,!ppc64le !gc purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go new file mode 100644 index 00000000..93da7322 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305.go @@ -0,0 +1,98 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD and its +// extended nonce variant XChaCha20-Poly1305, as specified in RFC 8439 and +// draft-irtf-cfrg-xchacha-01. +package chacha20poly1305 // import "golang.org/x/crypto/chacha20poly1305" + +import ( + "crypto/cipher" + "errors" +) + +const ( + // KeySize is the size of the key used by this AEAD, in bytes. + KeySize = 32 + + // NonceSize is the size of the nonce used with the standard variant of this + // AEAD, in bytes. + // + // Note that this is too short to be safely generated at random if the same + // key is reused more than 2³² times. + NonceSize = 12 + + // NonceSizeX is the size of the nonce used with the XChaCha20-Poly1305 + // variant of this AEAD, in bytes. + NonceSizeX = 24 + + // Overhead is the size of the Poly1305 authentication tag, and the + // difference between a ciphertext length and its plaintext. + Overhead = 16 +) + +type chacha20poly1305 struct { + key [KeySize]byte +} + +// New returns a ChaCha20-Poly1305 AEAD that uses the given 256-bit key. +func New(key []byte) (cipher.AEAD, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20poly1305: bad key length") + } + ret := new(chacha20poly1305) + copy(ret.key[:], key) + return ret, nil +} + +func (c *chacha20poly1305) NonceSize() int { + return NonceSize +} + +func (c *chacha20poly1305) Overhead() int { + return Overhead +} + +func (c *chacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if len(nonce) != NonceSize { + panic("chacha20poly1305: bad nonce length passed to Seal") + } + + if uint64(len(plaintext)) > (1<<38)-64 { + panic("chacha20poly1305: plaintext too large") + } + + return c.seal(dst, nonce, plaintext, additionalData) +} + +var errOpen = errors.New("chacha20poly1305: message authentication failed") + +func (c *chacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if len(nonce) != NonceSize { + panic("chacha20poly1305: bad nonce length passed to Open") + } + if len(ciphertext) < 16 { + return nil, errOpen + } + if uint64(len(ciphertext)) > (1<<38)-48 { + panic("chacha20poly1305: ciphertext too large") + } + + return c.open(dst, nonce, ciphertext, additionalData) +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go new file mode 100644 index 00000000..0c408c57 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.go @@ -0,0 +1,87 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego +// +build gc,!purego + +package chacha20poly1305 + +import ( + "encoding/binary" + + "golang.org/x/crypto/internal/alias" + "golang.org/x/sys/cpu" +) + +//go:noescape +func chacha20Poly1305Open(dst []byte, key []uint32, src, ad []byte) bool + +//go:noescape +func chacha20Poly1305Seal(dst []byte, key []uint32, src, ad []byte) + +var ( + useAVX2 = cpu.X86.HasAVX2 && cpu.X86.HasBMI2 +) + +// setupState writes a ChaCha20 input matrix to state. See +// https://tools.ietf.org/html/rfc7539#section-2.3. +func setupState(state *[16]uint32, key *[32]byte, nonce []byte) { + state[0] = 0x61707865 + state[1] = 0x3320646e + state[2] = 0x79622d32 + state[3] = 0x6b206574 + + state[4] = binary.LittleEndian.Uint32(key[0:4]) + state[5] = binary.LittleEndian.Uint32(key[4:8]) + state[6] = binary.LittleEndian.Uint32(key[8:12]) + state[7] = binary.LittleEndian.Uint32(key[12:16]) + state[8] = binary.LittleEndian.Uint32(key[16:20]) + state[9] = binary.LittleEndian.Uint32(key[20:24]) + state[10] = binary.LittleEndian.Uint32(key[24:28]) + state[11] = binary.LittleEndian.Uint32(key[28:32]) + + state[12] = 0 + state[13] = binary.LittleEndian.Uint32(nonce[0:4]) + state[14] = binary.LittleEndian.Uint32(nonce[4:8]) + state[15] = binary.LittleEndian.Uint32(nonce[8:12]) +} + +func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { + if !cpu.X86.HasSSSE3 { + return c.sealGeneric(dst, nonce, plaintext, additionalData) + } + + var state [16]uint32 + setupState(&state, &c.key, nonce) + + ret, out := sliceForAppend(dst, len(plaintext)+16) + if alias.InexactOverlap(out, plaintext) { + panic("chacha20poly1305: invalid buffer overlap") + } + chacha20Poly1305Seal(out[:], state[:], plaintext, additionalData) + return ret +} + +func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if !cpu.X86.HasSSSE3 { + return c.openGeneric(dst, nonce, ciphertext, additionalData) + } + + var state [16]uint32 + setupState(&state, &c.key, nonce) + + ciphertext = ciphertext[:len(ciphertext)-16] + ret, out := sliceForAppend(dst, len(ciphertext)) + if alias.InexactOverlap(out, ciphertext) { + panic("chacha20poly1305: invalid buffer overlap") + } + if !chacha20Poly1305Open(out, state[:], ciphertext, additionalData) { + for i := range out { + out[i] = 0 + } + return nil, errOpen + } + + return ret, nil +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s new file mode 100644 index 00000000..867c181a --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s @@ -0,0 +1,2696 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare. + +//go:build gc && !purego +// +build gc,!purego + +#include "textflag.h" +// General register allocation +#define oup DI +#define inp SI +#define inl BX +#define adp CX // free to reuse, after we hash the additional data +#define keyp R8 // free to reuse, when we copy the key to stack +#define itr2 R9 // general iterator +#define itr1 CX // general iterator +#define acc0 R10 +#define acc1 R11 +#define acc2 R12 +#define t0 R13 +#define t1 R14 +#define t2 R15 +#define t3 R8 +// Register and stack allocation for the SSE code +#define rStore (0*16)(BP) +#define sStore (1*16)(BP) +#define state1Store (2*16)(BP) +#define state2Store (3*16)(BP) +#define tmpStore (4*16)(BP) +#define ctr0Store (5*16)(BP) +#define ctr1Store (6*16)(BP) +#define ctr2Store (7*16)(BP) +#define ctr3Store (8*16)(BP) +#define A0 X0 +#define A1 X1 +#define A2 X2 +#define B0 X3 +#define B1 X4 +#define B2 X5 +#define C0 X6 +#define C1 X7 +#define C2 X8 +#define D0 X9 +#define D1 X10 +#define D2 X11 +#define T0 X12 +#define T1 X13 +#define T2 X14 +#define T3 X15 +#define A3 T0 +#define B3 T1 +#define C3 T2 +#define D3 T3 +// Register and stack allocation for the AVX2 code +#define rsStoreAVX2 (0*32)(BP) +#define state1StoreAVX2 (1*32)(BP) +#define state2StoreAVX2 (2*32)(BP) +#define ctr0StoreAVX2 (3*32)(BP) +#define ctr1StoreAVX2 (4*32)(BP) +#define ctr2StoreAVX2 (5*32)(BP) +#define ctr3StoreAVX2 (6*32)(BP) +#define tmpStoreAVX2 (7*32)(BP) // 256 bytes on stack +#define AA0 Y0 +#define AA1 Y5 +#define AA2 Y6 +#define AA3 Y7 +#define BB0 Y14 +#define BB1 Y9 +#define BB2 Y10 +#define BB3 Y11 +#define CC0 Y12 +#define CC1 Y13 +#define CC2 Y8 +#define CC3 Y15 +#define DD0 Y4 +#define DD1 Y1 +#define DD2 Y2 +#define DD3 Y3 +#define TT0 DD3 +#define TT1 AA3 +#define TT2 BB3 +#define TT3 CC3 +// ChaCha20 constants +DATA ·chacha20Constants<>+0x00(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+0x04(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+0x08(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+0x0c(SB)/4, $0x6b206574 +DATA ·chacha20Constants<>+0x10(SB)/4, $0x61707865 +DATA ·chacha20Constants<>+0x14(SB)/4, $0x3320646e +DATA ·chacha20Constants<>+0x18(SB)/4, $0x79622d32 +DATA ·chacha20Constants<>+0x1c(SB)/4, $0x6b206574 +// <<< 16 with PSHUFB +DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302 +DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A +DATA ·rol16<>+0x10(SB)/8, $0x0504070601000302 +DATA ·rol16<>+0x18(SB)/8, $0x0D0C0F0E09080B0A +// <<< 8 with PSHUFB +DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003 +DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B +DATA ·rol8<>+0x10(SB)/8, $0x0605040702010003 +DATA ·rol8<>+0x18(SB)/8, $0x0E0D0C0F0A09080B + +DATA ·avx2InitMask<>+0x00(SB)/8, $0x0 +DATA ·avx2InitMask<>+0x08(SB)/8, $0x0 +DATA ·avx2InitMask<>+0x10(SB)/8, $0x1 +DATA ·avx2InitMask<>+0x18(SB)/8, $0x0 + +DATA ·avx2IncMask<>+0x00(SB)/8, $0x2 +DATA ·avx2IncMask<>+0x08(SB)/8, $0x0 +DATA ·avx2IncMask<>+0x10(SB)/8, $0x2 +DATA ·avx2IncMask<>+0x18(SB)/8, $0x0 +// Poly1305 key clamp +DATA ·polyClampMask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·polyClampMask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +DATA ·polyClampMask<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF +DATA ·polyClampMask<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF + +DATA ·sseIncMask<>+0x00(SB)/8, $0x1 +DATA ·sseIncMask<>+0x08(SB)/8, $0x0 +// To load/store the last < 16 bytes in a buffer +DATA ·andMask<>+0x00(SB)/8, $0x00000000000000ff +DATA ·andMask<>+0x08(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x10(SB)/8, $0x000000000000ffff +DATA ·andMask<>+0x18(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x20(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+0x28(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x30(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+0x38(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x40(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+0x48(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x50(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+0x58(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x60(SB)/8, $0x00ffffffffffffff +DATA ·andMask<>+0x68(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x70(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0x78(SB)/8, $0x0000000000000000 +DATA ·andMask<>+0x80(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0x88(SB)/8, $0x00000000000000ff +DATA ·andMask<>+0x90(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0x98(SB)/8, $0x000000000000ffff +DATA ·andMask<>+0xa0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xa8(SB)/8, $0x0000000000ffffff +DATA ·andMask<>+0xb0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xb8(SB)/8, $0x00000000ffffffff +DATA ·andMask<>+0xc0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xc8(SB)/8, $0x000000ffffffffff +DATA ·andMask<>+0xd0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xd8(SB)/8, $0x0000ffffffffffff +DATA ·andMask<>+0xe0(SB)/8, $0xffffffffffffffff +DATA ·andMask<>+0xe8(SB)/8, $0x00ffffffffffffff + +GLOBL ·chacha20Constants<>(SB), (NOPTR+RODATA), $32 +GLOBL ·rol16<>(SB), (NOPTR+RODATA), $32 +GLOBL ·rol8<>(SB), (NOPTR+RODATA), $32 +GLOBL ·sseIncMask<>(SB), (NOPTR+RODATA), $16 +GLOBL ·avx2IncMask<>(SB), (NOPTR+RODATA), $32 +GLOBL ·avx2InitMask<>(SB), (NOPTR+RODATA), $32 +GLOBL ·polyClampMask<>(SB), (NOPTR+RODATA), $32 +GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240 +// No PALIGNR in Go ASM yet (but VPALIGNR is present). +#define shiftB0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X3, X3 +#define shiftB1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x04 // PALIGNR $4, X4, X4 +#define shiftB2Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X5, X5 +#define shiftB3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X13, X13 +#define shiftC0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X6, X6 +#define shiftC1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x08 // PALIGNR $8, X7, X7 +#define shiftC2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc0; BYTE $0x08 // PALIGNR $8, X8, X8 +#define shiftC3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X14, X14 +#define shiftD0Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x0c // PALIGNR $12, X9, X9 +#define shiftD1Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x0c // PALIGNR $12, X10, X10 +#define shiftD2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X11, X11 +#define shiftD3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x0c // PALIGNR $12, X15, X15 +#define shiftB0Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X3, X3 +#define shiftB1Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x0c // PALIGNR $12, X4, X4 +#define shiftB2Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X5, X5 +#define shiftB3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X13, X13 +#define shiftC0Right shiftC0Left +#define shiftC1Right shiftC1Left +#define shiftC2Right shiftC2Left +#define shiftC3Right shiftC3Left +#define shiftD0Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x04 // PALIGNR $4, X9, X9 +#define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10 +#define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11 +#define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15 +// Some macros +#define chachaQR(A, B, C, D, T) \ + PADDD B, A; PXOR A, D; PSHUFB ·rol16<>(SB), D \ + PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \ + PADDD B, A; PXOR A, D; PSHUFB ·rol8<>(SB), D \ + PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B + +#define chachaQR_AVX2(A, B, C, D, T) \ + VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol16<>(SB), D, D \ + VPADDD D, C, C; VPXOR C, B, B; VPSLLD $12, B, T; VPSRLD $20, B, B; VPXOR T, B, B \ + VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol8<>(SB), D, D \ + VPADDD D, C, C; VPXOR C, B, B; VPSLLD $7, B, T; VPSRLD $25, B, B; VPXOR T, B, B + +#define polyAdd(S) ADDQ S, acc0; ADCQ 8+S, acc1; ADCQ $1, acc2 +#define polyMulStage1 MOVQ (0*8)(BP), AX; MOVQ AX, t2; MULQ acc0; MOVQ AX, t0; MOVQ DX, t1; MOVQ (0*8)(BP), AX; MULQ acc1; IMULQ acc2, t2; ADDQ AX, t1; ADCQ DX, t2 +#define polyMulStage2 MOVQ (1*8)(BP), AX; MOVQ AX, t3; MULQ acc0; ADDQ AX, t1; ADCQ $0, DX; MOVQ DX, acc0; MOVQ (1*8)(BP), AX; MULQ acc1; ADDQ AX, t2; ADCQ $0, DX +#define polyMulStage3 IMULQ acc2, t3; ADDQ acc0, t2; ADCQ DX, t3 +#define polyMulReduceStage MOVQ t0, acc0; MOVQ t1, acc1; MOVQ t2, acc2; ANDQ $3, acc2; MOVQ t2, t0; ANDQ $-4, t0; MOVQ t3, t1; SHRQ $2, t3, t2; SHRQ $2, t3; ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $0, acc2; ADDQ t2, acc0; ADCQ t3, acc1; ADCQ $0, acc2 + +#define polyMulStage1_AVX2 MOVQ (0*8)(BP), DX; MOVQ DX, t2; MULXQ acc0, t0, t1; IMULQ acc2, t2; MULXQ acc1, AX, DX; ADDQ AX, t1; ADCQ DX, t2 +#define polyMulStage2_AVX2 MOVQ (1*8)(BP), DX; MULXQ acc0, acc0, AX; ADDQ acc0, t1; MULXQ acc1, acc1, t3; ADCQ acc1, t2; ADCQ $0, t3 +#define polyMulStage3_AVX2 IMULQ acc2, DX; ADDQ AX, t2; ADCQ DX, t3 + +#define polyMul polyMulStage1; polyMulStage2; polyMulStage3; polyMulReduceStage +#define polyMulAVX2 polyMulStage1_AVX2; polyMulStage2_AVX2; polyMulStage3_AVX2; polyMulReduceStage +// ---------------------------------------------------------------------------- +TEXT polyHashADInternal<>(SB), NOSPLIT, $0 + // adp points to beginning of additional data + // itr2 holds ad length + XORQ acc0, acc0 + XORQ acc1, acc1 + XORQ acc2, acc2 + CMPQ itr2, $13 + JNE hashADLoop + +openFastTLSAD: + // Special treatment for the TLS case of 13 bytes + MOVQ (adp), acc0 + MOVQ 5(adp), acc1 + SHRQ $24, acc1 + MOVQ $1, acc2 + polyMul + RET + +hashADLoop: + // Hash in 16 byte chunks + CMPQ itr2, $16 + JB hashADTail + polyAdd(0(adp)) + LEAQ (1*16)(adp), adp + SUBQ $16, itr2 + polyMul + JMP hashADLoop + +hashADTail: + CMPQ itr2, $0 + JE hashADDone + + // Hash last < 16 byte tail + XORQ t0, t0 + XORQ t1, t1 + XORQ t2, t2 + ADDQ itr2, adp + +hashADTailLoop: + SHLQ $8, t0, t1 + SHLQ $8, t0 + MOVB -1(adp), t2 + XORQ t2, t0 + DECQ adp + DECQ itr2 + JNE hashADTailLoop + +hashADTailFinish: + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + + // Finished AD +hashADDone: + RET + +// ---------------------------------------------------------------------------- +// func chacha20Poly1305Open(dst, key, src, ad []byte) bool +TEXT ·chacha20Poly1305Open(SB), 0, $288-97 + // For aligned stack access + MOVQ SP, BP + ADDQ $32, BP + ANDQ $-32, BP + MOVQ dst+0(FP), oup + MOVQ key+24(FP), keyp + MOVQ src+48(FP), inp + MOVQ src_len+56(FP), inl + MOVQ ad+72(FP), adp + + // Check for AVX2 support + CMPB ·useAVX2(SB), $1 + JE chacha20Poly1305Open_AVX2 + + // Special optimization, for very short buffers + CMPQ inl, $128 + JBE openSSE128 // About 16% faster + + // For long buffers, prepare the poly key first + MOVOU ·chacha20Constants<>(SB), A0 + MOVOU (1*16)(keyp), B0 + MOVOU (2*16)(keyp), C0 + MOVOU (3*16)(keyp), D0 + MOVO D0, T1 + + // Store state on stack for future use + MOVO B0, state1Store + MOVO C0, state2Store + MOVO D0, ctr3Store + MOVQ $10, itr2 + +openSSEPreparePolyKey: + chachaQR(A0, B0, C0, D0, T0) + shiftB0Left; shiftC0Left; shiftD0Left + chachaQR(A0, B0, C0, D0, T0) + shiftB0Right; shiftC0Right; shiftD0Right + DECQ itr2 + JNE openSSEPreparePolyKey + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0 + + // Clamp and store the key + PAND ·polyClampMask<>(SB), A0 + MOVO A0, rStore; MOVO B0, sStore + + // Hash AAD + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + +openSSEMainLoop: + CMPQ inl, $256 + JB openSSEMainLoopDone + + // Load state, increment counter blocks + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + + // There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash 2 blocks, and for the remaining 4 only 1 block - for a total of 16 + MOVQ $4, itr1 + MOVQ inp, itr2 + +openSSEInternalLoop: + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyAdd(0(itr2)) + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + polyMulStage1 + polyMulStage2 + LEAQ (2*8)(itr2), itr2 + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + polyMulStage3 + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyMulReduceStage + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + DECQ itr1 + JGE openSSEInternalLoop + + polyAdd(0(itr2)) + polyMul + LEAQ (2*8)(itr2), itr2 + + CMPQ itr1, $-6 + JG openSSEInternalLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + + // Load - xor - store + MOVO D3, tmpStore + MOVOU (0*16)(inp), D3; PXOR D3, A0; MOVOU A0, (0*16)(oup) + MOVOU (1*16)(inp), D3; PXOR D3, B0; MOVOU B0, (1*16)(oup) + MOVOU (2*16)(inp), D3; PXOR D3, C0; MOVOU C0, (2*16)(oup) + MOVOU (3*16)(inp), D3; PXOR D3, D0; MOVOU D0, (3*16)(oup) + MOVOU (4*16)(inp), D0; PXOR D0, A1; MOVOU A1, (4*16)(oup) + MOVOU (5*16)(inp), D0; PXOR D0, B1; MOVOU B1, (5*16)(oup) + MOVOU (6*16)(inp), D0; PXOR D0, C1; MOVOU C1, (6*16)(oup) + MOVOU (7*16)(inp), D0; PXOR D0, D1; MOVOU D1, (7*16)(oup) + MOVOU (8*16)(inp), D0; PXOR D0, A2; MOVOU A2, (8*16)(oup) + MOVOU (9*16)(inp), D0; PXOR D0, B2; MOVOU B2, (9*16)(oup) + MOVOU (10*16)(inp), D0; PXOR D0, C2; MOVOU C2, (10*16)(oup) + MOVOU (11*16)(inp), D0; PXOR D0, D2; MOVOU D2, (11*16)(oup) + MOVOU (12*16)(inp), D0; PXOR D0, A3; MOVOU A3, (12*16)(oup) + MOVOU (13*16)(inp), D0; PXOR D0, B3; MOVOU B3, (13*16)(oup) + MOVOU (14*16)(inp), D0; PXOR D0, C3; MOVOU C3, (14*16)(oup) + MOVOU (15*16)(inp), D0; PXOR tmpStore, D0; MOVOU D0, (15*16)(oup) + LEAQ 256(inp), inp + LEAQ 256(oup), oup + SUBQ $256, inl + JMP openSSEMainLoop + +openSSEMainLoopDone: + // Handle the various tail sizes efficiently + TESTQ inl, inl + JE openSSEFinalize + CMPQ inl, $64 + JBE openSSETail64 + CMPQ inl, $128 + JBE openSSETail128 + CMPQ inl, $192 + JBE openSSETail192 + JMP openSSETail256 + +openSSEFinalize: + // Hash in the PT, AAD lengths + ADDQ ad_len+80(FP), acc0; ADCQ src_len+56(FP), acc1; ADCQ $1, acc2 + polyMul + + // Final reduce + MOVQ acc0, t0 + MOVQ acc1, t1 + MOVQ acc2, t2 + SUBQ $-5, acc0 + SBBQ $-1, acc1 + SBBQ $3, acc2 + CMOVQCS t0, acc0 + CMOVQCS t1, acc1 + CMOVQCS t2, acc2 + + // Add in the "s" part of the key + ADDQ 0+sStore, acc0 + ADCQ 8+sStore, acc1 + + // Finally, constant time compare to the tag at the end of the message + XORQ AX, AX + MOVQ $1, DX + XORQ (0*8)(inp), acc0 + XORQ (1*8)(inp), acc1 + ORQ acc1, acc0 + CMOVQEQ DX, AX + + // Return true iff tags are equal + MOVB AX, ret+96(FP) + RET + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 129 bytes +openSSE128: + // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks + MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 + MOVQ $10, itr2 + +openSSE128InnerCipherLoop: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftB1Left; shiftB2Left + shiftC0Left; shiftC1Left; shiftC2Left + shiftD0Left; shiftD1Left; shiftD2Left + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftB1Right; shiftB2Right + shiftC0Right; shiftC1Right; shiftC2Right + shiftD0Right; shiftD1Right; shiftD2Right + DECQ itr2 + JNE openSSE128InnerCipherLoop + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 + PADDL T2, C1; PADDL T2, C2 + PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 + + // Clamp and store the key + PAND ·polyClampMask<>(SB), A0 + MOVOU A0, rStore; MOVOU B0, sStore + + // Hash + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + +openSSE128Open: + CMPQ inl, $16 + JB openSSETail16 + SUBQ $16, inl + + // Load for hashing + polyAdd(0(inp)) + + // Load for decryption + MOVOU (inp), T0; PXOR T0, A1; MOVOU A1, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + polyMul + + // Shift the stream "left" + MOVO B1, A1 + MOVO C1, B1 + MOVO D1, C1 + MOVO A2, D1 + MOVO B2, A2 + MOVO C2, B2 + MOVO D2, C2 + JMP openSSE128Open + +openSSETail16: + TESTQ inl, inl + JE openSSEFinalize + + // We can safely load the CT from the end, because it is padded with the MAC + MOVQ inl, itr2 + SHLQ $4, itr2 + LEAQ ·andMask<>(SB), t0 + MOVOU (inp), T0 + ADDQ inl, inp + PAND -16(t0)(itr2*1), T0 + MOVO T0, 0+tmpStore + MOVQ T0, t0 + MOVQ 8+tmpStore, t1 + PXOR A1, T0 + + // We can only store one byte at a time, since plaintext can be shorter than 16 bytes +openSSETail16Store: + MOVQ T0, t3 + MOVB t3, (oup) + PSRLDQ $1, T0 + INCQ oup + DECQ inl + JNE openSSETail16Store + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + JMP openSSEFinalize + +// ---------------------------------------------------------------------------- +// Special optimization for the last 64 bytes of ciphertext +openSSETail64: + // Need to decrypt up to 64 bytes - prepare single block + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store + XORQ itr2, itr2 + MOVQ inl, itr1 + CMPQ itr1, $16 + JB openSSETail64LoopB + +openSSETail64LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMul + SUBQ $16, itr1 + +openSSETail64LoopB: + ADDQ $16, itr2 + chachaQR(A0, B0, C0, D0, T0) + shiftB0Left; shiftC0Left; shiftD0Left + chachaQR(A0, B0, C0, D0, T0) + shiftB0Right; shiftC0Right; shiftD0Right + + CMPQ itr1, $16 + JAE openSSETail64LoopA + + CMPQ itr2, $160 + JNE openSSETail64LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0; PADDL state2Store, C0; PADDL ctr0Store, D0 + +openSSETail64DecLoop: + CMPQ inl, $16 + JB openSSETail64DecLoopDone + SUBQ $16, inl + MOVOU (inp), T0 + PXOR T0, A0 + MOVOU A0, (oup) + LEAQ 16(inp), inp + LEAQ 16(oup), oup + MOVO B0, A0 + MOVO C0, B0 + MOVO D0, C0 + JMP openSSETail64DecLoop + +openSSETail64DecLoopDone: + MOVO A0, A1 + JMP openSSETail16 + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of ciphertext +openSSETail128: + // Need to decrypt up to 128 bytes - prepare two blocks + MOVO ·chacha20Constants<>(SB), A1; MOVO state1Store, B1; MOVO state2Store, C1; MOVO ctr3Store, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr0Store + MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr1Store + XORQ itr2, itr2 + MOVQ inl, itr1 + ANDQ $-16, itr1 + +openSSETail128LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMul + +openSSETail128LoopB: + ADDQ $16, itr2 + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + + CMPQ itr2, itr1 + JB openSSETail128LoopA + + CMPQ itr2, $160 + JNE openSSETail128LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 + PADDL state1Store, B0; PADDL state1Store, B1 + PADDL state2Store, C0; PADDL state2Store, C1 + PADDL ctr1Store, D0; PADDL ctr0Store, D1 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 + MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) + + SUBQ $64, inl + LEAQ 64(inp), inp + LEAQ 64(oup), oup + JMP openSSETail64DecLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 192 bytes of ciphertext +openSSETail192: + // Need to decrypt up to 192 bytes - prepare three blocks + MOVO ·chacha20Constants<>(SB), A2; MOVO state1Store, B2; MOVO state2Store, C2; MOVO ctr3Store, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr0Store + MOVO A2, A1; MOVO B2, B1; MOVO C2, C1; MOVO D2, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr2Store + + MOVQ inl, itr1 + MOVQ $160, itr2 + CMPQ itr1, $160 + CMOVQGT itr2, itr1 + ANDQ $-16, itr1 + XORQ itr2, itr2 + +openSSLTail192LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMul + +openSSLTail192LoopB: + ADDQ $16, itr2 + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + shiftB2Left; shiftC2Left; shiftD2Left + + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + shiftB2Right; shiftC2Right; shiftD2Right + + CMPQ itr2, itr1 + JB openSSLTail192LoopA + + CMPQ itr2, $160 + JNE openSSLTail192LoopB + + CMPQ inl, $176 + JB openSSLTail192Store + + polyAdd(160(inp)) + polyMul + + CMPQ inl, $192 + JB openSSLTail192Store + + polyAdd(176(inp)) + polyMul + +openSSLTail192Store: + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 + PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 + PADDL ctr2Store, D0; PADDL ctr1Store, D1; PADDL ctr0Store, D2 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A2; PXOR T1, B2; PXOR T2, C2; PXOR T3, D2 + MOVOU A2, (0*16)(oup); MOVOU B2, (1*16)(oup); MOVOU C2, (2*16)(oup); MOVOU D2, (3*16)(oup) + + MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 + PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + + SUBQ $128, inl + LEAQ 128(inp), inp + LEAQ 128(oup), oup + JMP openSSETail64DecLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 256 bytes of ciphertext +openSSETail256: + // Need to decrypt up to 256 bytes - prepare four blocks + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + XORQ itr2, itr2 + +openSSETail256Loop: + // This loop inteleaves 8 ChaCha quarter rounds with 1 poly multiplication + polyAdd(0(inp)(itr2*1)) + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + polyMulStage1 + polyMulStage2 + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyMulStage3 + polyMulReduceStage + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + ADDQ $2*8, itr2 + CMPQ itr2, $160 + JB openSSETail256Loop + MOVQ inl, itr1 + ANDQ $-16, itr1 + +openSSETail256HashLoop: + polyAdd(0(inp)(itr2*1)) + polyMul + ADDQ $2*8, itr2 + CMPQ itr2, itr1 + JB openSSETail256HashLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + MOVO D3, tmpStore + + // Load - xor - store + MOVOU (0*16)(inp), D3; PXOR D3, A0 + MOVOU (1*16)(inp), D3; PXOR D3, B0 + MOVOU (2*16)(inp), D3; PXOR D3, C0 + MOVOU (3*16)(inp), D3; PXOR D3, D0 + MOVOU A0, (0*16)(oup) + MOVOU B0, (1*16)(oup) + MOVOU C0, (2*16)(oup) + MOVOU D0, (3*16)(oup) + MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 + PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 + PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 + MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) + LEAQ 192(inp), inp + LEAQ 192(oup), oup + SUBQ $192, inl + MOVO A3, A0 + MOVO B3, B0 + MOVO C3, C0 + MOVO tmpStore, D0 + + JMP openSSETail64DecLoop + +// ---------------------------------------------------------------------------- +// ------------------------- AVX2 Code ---------------------------------------- +chacha20Poly1305Open_AVX2: + VZEROUPPER + VMOVDQU ·chacha20Constants<>(SB), AA0 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 + BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 + VPADDD ·avx2InitMask<>(SB), DD0, DD0 + + // Special optimization, for very short buffers + CMPQ inl, $192 + JBE openAVX2192 + CMPQ inl, $320 + JBE openAVX2320 + + // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream + VMOVDQA BB0, state1StoreAVX2 + VMOVDQA CC0, state2StoreAVX2 + VMOVDQA DD0, ctr3StoreAVX2 + MOVQ $10, itr2 + +openAVX2PreparePolyKey: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 + DECQ itr2 + JNE openAVX2PreparePolyKey + + VPADDD ·chacha20Constants<>(SB), AA0, AA0 + VPADDD state1StoreAVX2, BB0, BB0 + VPADDD state2StoreAVX2, CC0, CC0 + VPADDD ctr3StoreAVX2, DD0, DD0 + + VPERM2I128 $0x02, AA0, BB0, TT0 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for the first 64 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + + // Hash AD + first 64 bytes + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + XORQ itr1, itr1 + +openAVX2InitialHash64: + polyAdd(0(inp)(itr1*1)) + polyMulAVX2 + ADDQ $16, itr1 + CMPQ itr1, $64 + JNE openAVX2InitialHash64 + + // Decrypt the first 64 bytes + VPXOR (0*32)(inp), AA0, AA0 + VPXOR (1*32)(inp), BB0, BB0 + VMOVDQU AA0, (0*32)(oup) + VMOVDQU BB0, (1*32)(oup) + LEAQ (2*32)(inp), inp + LEAQ (2*32)(oup), oup + SUBQ $64, inl + +openAVX2MainLoop: + CMPQ inl, $512 + JB openAVX2MainLoopDone + + // Load state, increment counter blocks, store the incremented counters + VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + XORQ itr1, itr1 + +openAVX2InternalLoop: + // Lets just say this spaghetti loop interleaves 2 quarter rounds with 3 poly multiplications + // Effectively per 512 bytes of stream we hash 480 bytes of ciphertext + polyAdd(0*8(inp)(itr1*1)) + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage1_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulStage2_AVX2 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyMulStage3_AVX2 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + polyAdd(2*8(inp)(itr1*1)) + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage1_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage2_AVX2 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage3_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulReduceStage + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(4*8(inp)(itr1*1)) + LEAQ (6*8)(itr1), itr1 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage1_AVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + polyMulStage2_AVX2 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage3_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + CMPQ itr1, $480 + JNE openAVX2InternalLoop + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + + // We only hashed 480 of the 512 bytes available - hash the remaining 32 here + polyAdd(480(inp)) + polyMulAVX2 + VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 + VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 + VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + + // and here + polyAdd(496(inp)) + polyMulAVX2 + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 + VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) + LEAQ (32*16)(inp), inp + LEAQ (32*16)(oup), oup + SUBQ $(32*16), inl + JMP openAVX2MainLoop + +openAVX2MainLoopDone: + // Handle the various tail sizes efficiently + TESTQ inl, inl + JE openSSEFinalize + CMPQ inl, $128 + JBE openAVX2Tail128 + CMPQ inl, $256 + JBE openAVX2Tail256 + CMPQ inl, $384 + JBE openAVX2Tail384 + JMP openAVX2Tail512 + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 193 bytes +openAVX2192: + // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks + VMOVDQA AA0, AA1 + VMOVDQA BB0, BB1 + VMOVDQA CC0, CC1 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2 + VMOVDQA BB0, BB2 + VMOVDQA CC0, CC2 + VMOVDQA DD0, DD2 + VMOVDQA DD1, TT3 + MOVQ $10, itr2 + +openAVX2192InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + DECQ itr2 + JNE openAVX2192InnerCipherLoop + VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 + VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 + VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 + VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, TT0 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 192 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + +openAVX2ShortOpen: + // Hash + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + +openAVX2ShortOpenLoop: + CMPQ inl, $32 + JB openAVX2ShortTail32 + SUBQ $32, inl + + // Load for hashing + polyAdd(0*8(inp)) + polyMulAVX2 + polyAdd(2*8(inp)) + polyMulAVX2 + + // Load for decryption + VPXOR (inp), AA0, AA0 + VMOVDQU AA0, (oup) + LEAQ (1*32)(inp), inp + LEAQ (1*32)(oup), oup + + // Shift stream left + VMOVDQA BB0, AA0 + VMOVDQA CC0, BB0 + VMOVDQA DD0, CC0 + VMOVDQA AA1, DD0 + VMOVDQA BB1, AA1 + VMOVDQA CC1, BB1 + VMOVDQA DD1, CC1 + VMOVDQA AA2, DD1 + VMOVDQA BB2, AA2 + JMP openAVX2ShortOpenLoop + +openAVX2ShortTail32: + CMPQ inl, $16 + VMOVDQA A0, A1 + JB openAVX2ShortDone + + SUBQ $16, inl + + // Load for hashing + polyAdd(0*8(inp)) + polyMulAVX2 + + // Load for decryption + VPXOR (inp), A0, T0 + VMOVDQU T0, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + VPERM2I128 $0x11, AA0, AA0, AA0 + VMOVDQA A0, A1 + +openAVX2ShortDone: + VZEROUPPER + JMP openSSETail16 + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 321 bytes +openAVX2320: + // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks + VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 + MOVQ $10, itr2 + +openAVX2320InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + DECQ itr2 + JNE openAVX2320InnerCipherLoop + + VMOVDQA ·chacha20Constants<>(SB), TT0 + VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 + VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 + VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 + VMOVDQA ·avx2IncMask<>(SB), TT0 + VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD2, DD2 + + // Clamp and store poly key + VPERM2I128 $0x02, AA0, BB0, TT0 + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 320 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x02, AA2, BB2, CC1 + VPERM2I128 $0x02, CC2, DD2, DD1 + VPERM2I128 $0x13, AA2, BB2, AA2 + VPERM2I128 $0x13, CC2, DD2, BB2 + JMP openAVX2ShortOpen + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of ciphertext +openAVX2Tail128: + // Need to decrypt up to 128 bytes - prepare two blocks + VMOVDQA ·chacha20Constants<>(SB), AA1 + VMOVDQA state1StoreAVX2, BB1 + VMOVDQA state2StoreAVX2, CC1 + VMOVDQA ctr3StoreAVX2, DD1 + VPADDD ·avx2IncMask<>(SB), DD1, DD1 + VMOVDQA DD1, DD0 + + XORQ itr2, itr2 + MOVQ inl, itr1 + ANDQ $-16, itr1 + TESTQ itr1, itr1 + JE openAVX2Tail128LoopB + +openAVX2Tail128LoopA: + // Perform ChaCha rounds, while hashing the remaining input + polyAdd(0(inp)(itr2*1)) + polyMulAVX2 + +openAVX2Tail128LoopB: + ADDQ $16, itr2 + chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD1, DD1, DD1 + CMPQ itr2, itr1 + JB openAVX2Tail128LoopA + CMPQ itr2, $160 + JNE openAVX2Tail128LoopB + + VPADDD ·chacha20Constants<>(SB), AA1, AA1 + VPADDD state1StoreAVX2, BB1, BB1 + VPADDD state2StoreAVX2, CC1, CC1 + VPADDD DD0, DD1, DD1 + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + +openAVX2TailLoop: + CMPQ inl, $32 + JB openAVX2Tail + SUBQ $32, inl + + // Load for decryption + VPXOR (inp), AA0, AA0 + VMOVDQU AA0, (oup) + LEAQ (1*32)(inp), inp + LEAQ (1*32)(oup), oup + VMOVDQA BB0, AA0 + VMOVDQA CC0, BB0 + VMOVDQA DD0, CC0 + JMP openAVX2TailLoop + +openAVX2Tail: + CMPQ inl, $16 + VMOVDQA A0, A1 + JB openAVX2TailDone + SUBQ $16, inl + + // Load for decryption + VPXOR (inp), A0, T0 + VMOVDQU T0, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + VPERM2I128 $0x11, AA0, AA0, AA0 + VMOVDQA A0, A1 + +openAVX2TailDone: + VZEROUPPER + JMP openSSETail16 + +// ---------------------------------------------------------------------------- +// Special optimization for the last 256 bytes of ciphertext +openAVX2Tail256: + // Need to decrypt up to 256 bytes - prepare four blocks + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA DD0, TT1 + VMOVDQA DD1, TT2 + + // Compute the number of iterations that will hash data + MOVQ inl, tmpStoreAVX2 + MOVQ inl, itr1 + SUBQ $128, itr1 + SHRQ $4, itr1 + MOVQ $10, itr2 + CMPQ itr1, $10 + CMOVQGT itr2, itr1 + MOVQ inp, inl + XORQ itr2, itr2 + +openAVX2Tail256LoopA: + polyAdd(0(inl)) + polyMulAVX2 + LEAQ 16(inl), inl + + // Perform ChaCha rounds, while hashing the remaining input +openAVX2Tail256LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + INCQ itr2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + CMPQ itr2, itr1 + JB openAVX2Tail256LoopA + + CMPQ itr2, $10 + JNE openAVX2Tail256LoopB + + MOVQ inl, itr2 + SUBQ inp, inl + MOVQ inl, itr1 + MOVQ tmpStoreAVX2, inl + + // Hash the remainder of data (if any) +openAVX2Tail256Hash: + ADDQ $16, itr1 + CMPQ itr1, inl + JGT openAVX2Tail256HashEnd + polyAdd (0(itr2)) + polyMulAVX2 + LEAQ 16(itr2), itr2 + JMP openAVX2Tail256Hash + +// Store 128 bytes safely, then go to store loop +openAVX2Tail256HashEnd: + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 + VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, AA2; VPERM2I128 $0x02, CC0, DD0, BB2; VPERM2I128 $0x13, AA0, BB0, CC2; VPERM2I128 $0x13, CC0, DD0, DD2 + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + + VPXOR (0*32)(inp), AA2, AA2; VPXOR (1*32)(inp), BB2, BB2; VPXOR (2*32)(inp), CC2, CC2; VPXOR (3*32)(inp), DD2, DD2 + VMOVDQU AA2, (0*32)(oup); VMOVDQU BB2, (1*32)(oup); VMOVDQU CC2, (2*32)(oup); VMOVDQU DD2, (3*32)(oup) + LEAQ (4*32)(inp), inp + LEAQ (4*32)(oup), oup + SUBQ $4*32, inl + + JMP openAVX2TailLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 384 bytes of ciphertext +openAVX2Tail384: + // Need to decrypt up to 384 bytes - prepare six blocks + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA DD0, ctr0StoreAVX2 + VMOVDQA DD1, ctr1StoreAVX2 + VMOVDQA DD2, ctr2StoreAVX2 + + // Compute the number of iterations that will hash two blocks of data + MOVQ inl, tmpStoreAVX2 + MOVQ inl, itr1 + SUBQ $256, itr1 + SHRQ $4, itr1 + ADDQ $6, itr1 + MOVQ $10, itr2 + CMPQ itr1, $10 + CMOVQGT itr2, itr1 + MOVQ inp, inl + XORQ itr2, itr2 + + // Perform ChaCha rounds, while hashing the remaining input +openAVX2Tail384LoopB: + polyAdd(0(inl)) + polyMulAVX2 + LEAQ 16(inl), inl + +openAVX2Tail384LoopA: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + polyAdd(0(inl)) + polyMulAVX2 + LEAQ 16(inl), inl + INCQ itr2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + + CMPQ itr2, itr1 + JB openAVX2Tail384LoopB + + CMPQ itr2, $10 + JNE openAVX2Tail384LoopA + + MOVQ inl, itr2 + SUBQ inp, inl + MOVQ inl, itr1 + MOVQ tmpStoreAVX2, inl + +openAVX2Tail384Hash: + ADDQ $16, itr1 + CMPQ itr1, inl + JGT openAVX2Tail384HashEnd + polyAdd(0(itr2)) + polyMulAVX2 + LEAQ 16(itr2), itr2 + JMP openAVX2Tail384Hash + +// Store 256 bytes safely, then go to store loop +openAVX2Tail384HashEnd: + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2 + VPERM2I128 $0x02, AA0, BB0, TT0; VPERM2I128 $0x02, CC0, DD0, TT1; VPERM2I128 $0x13, AA0, BB0, TT2; VPERM2I128 $0x13, CC0, DD0, TT3 + VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 + VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, TT0; VPERM2I128 $0x02, CC1, DD1, TT1; VPERM2I128 $0x13, AA1, BB1, TT2; VPERM2I128 $0x13, CC1, DD1, TT3 + VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 + VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + LEAQ (8*32)(inp), inp + LEAQ (8*32)(oup), oup + SUBQ $8*32, inl + JMP openAVX2TailLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 512 bytes of ciphertext +openAVX2Tail512: + VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + XORQ itr1, itr1 + MOVQ inp, itr2 + +openAVX2Tail512LoopB: + polyAdd(0(itr2)) + polyMulAVX2 + LEAQ (2*8)(itr2), itr2 + +openAVX2Tail512LoopA: + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyAdd(0*8(itr2)) + polyMulAVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(2*8(itr2)) + polyMulAVX2 + LEAQ (4*8)(itr2), itr2 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + INCQ itr1 + CMPQ itr1, $4 + JLT openAVX2Tail512LoopB + + CMPQ itr1, $10 + JNE openAVX2Tail512LoopA + + MOVQ inl, itr1 + SUBQ $384, itr1 + ANDQ $-16, itr1 + +openAVX2Tail512HashLoop: + TESTQ itr1, itr1 + JE openAVX2Tail512HashEnd + polyAdd(0(itr2)) + polyMulAVX2 + LEAQ 16(itr2), itr2 + SUBQ $16, itr1 + JMP openAVX2Tail512HashLoop + +openAVX2Tail512HashEnd: + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 + VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 + VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + + LEAQ (12*32)(inp), inp + LEAQ (12*32)(oup), oup + SUBQ $12*32, inl + + JMP openAVX2TailLoop + +// ---------------------------------------------------------------------------- +// ---------------------------------------------------------------------------- +// func chacha20Poly1305Seal(dst, key, src, ad []byte) +TEXT ·chacha20Poly1305Seal(SB), 0, $288-96 + // For aligned stack access + MOVQ SP, BP + ADDQ $32, BP + ANDQ $-32, BP + MOVQ dst+0(FP), oup + MOVQ key+24(FP), keyp + MOVQ src+48(FP), inp + MOVQ src_len+56(FP), inl + MOVQ ad+72(FP), adp + + CMPB ·useAVX2(SB), $1 + JE chacha20Poly1305Seal_AVX2 + + // Special optimization, for very short buffers + CMPQ inl, $128 + JBE sealSSE128 // About 15% faster + + // In the seal case - prepare the poly key + 3 blocks of stream in the first iteration + MOVOU ·chacha20Constants<>(SB), A0 + MOVOU (1*16)(keyp), B0 + MOVOU (2*16)(keyp), C0 + MOVOU (3*16)(keyp), D0 + + // Store state on stack for future use + MOVO B0, state1Store + MOVO C0, state2Store + + // Load state, increment counter blocks + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + MOVQ $10, itr2 + +sealSSEIntroLoop: + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + DECQ itr2 + JNE sealSSEIntroLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + + // Clamp and store the key + PAND ·polyClampMask<>(SB), A0 + MOVO A0, rStore + MOVO B0, sStore + + // Hash AAD + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + + MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 + PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 + MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup) + MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 + PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 + MOVOU A2, (4*16)(oup); MOVOU B2, (5*16)(oup); MOVOU C2, (6*16)(oup); MOVOU D2, (7*16)(oup) + + MOVQ $128, itr1 + SUBQ $128, inl + LEAQ 128(inp), inp + + MOVO A3, A1; MOVO B3, B1; MOVO C3, C1; MOVO D3, D1 + + CMPQ inl, $64 + JBE sealSSE128SealHash + + MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 + PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 + MOVOU A3, (8*16)(oup); MOVOU B3, (9*16)(oup); MOVOU C3, (10*16)(oup); MOVOU D3, (11*16)(oup) + + ADDQ $64, itr1 + SUBQ $64, inl + LEAQ 64(inp), inp + + MOVQ $2, itr1 + MOVQ $8, itr2 + + CMPQ inl, $64 + JBE sealSSETail64 + CMPQ inl, $128 + JBE sealSSETail128 + CMPQ inl, $192 + JBE sealSSETail192 + +sealSSEMainLoop: + // Load state, increment counter blocks + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3 + + // Store counters + MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store + +sealSSEInnerLoop: + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyAdd(0(oup)) + shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left + shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left + shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left + polyMulStage1 + polyMulStage2 + LEAQ (2*8)(oup), oup + MOVO C3, tmpStore + chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3) + MOVO tmpStore, C3 + MOVO C1, tmpStore + polyMulStage3 + chachaQR(A3, B3, C3, D3, C1) + MOVO tmpStore, C1 + polyMulReduceStage + shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right + shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right + shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right + DECQ itr2 + JGE sealSSEInnerLoop + polyAdd(0(oup)) + polyMul + LEAQ (2*8)(oup), oup + DECQ itr1 + JG sealSSEInnerLoop + + // Add in the state + PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3 + PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3 + PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3 + PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3 + MOVO D3, tmpStore + + // Load - xor - store + MOVOU (0*16)(inp), D3; PXOR D3, A0 + MOVOU (1*16)(inp), D3; PXOR D3, B0 + MOVOU (2*16)(inp), D3; PXOR D3, C0 + MOVOU (3*16)(inp), D3; PXOR D3, D0 + MOVOU A0, (0*16)(oup) + MOVOU B0, (1*16)(oup) + MOVOU C0, (2*16)(oup) + MOVOU D0, (3*16)(oup) + MOVO tmpStore, D3 + + MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0 + PXOR A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0 + PXOR A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2 + MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup) + ADDQ $192, inp + MOVQ $192, itr1 + SUBQ $192, inl + MOVO A3, A1 + MOVO B3, B1 + MOVO C3, C1 + MOVO D3, D1 + CMPQ inl, $64 + JBE sealSSE128SealHash + MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0 + PXOR A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3 + MOVOU A3, (12*16)(oup); MOVOU B3, (13*16)(oup); MOVOU C3, (14*16)(oup); MOVOU D3, (15*16)(oup) + LEAQ 64(inp), inp + SUBQ $64, inl + MOVQ $6, itr1 + MOVQ $4, itr2 + CMPQ inl, $192 + JG sealSSEMainLoop + + MOVQ inl, itr1 + TESTQ inl, inl + JE sealSSE128SealHash + MOVQ $6, itr1 + CMPQ inl, $64 + JBE sealSSETail64 + CMPQ inl, $128 + JBE sealSSETail128 + JMP sealSSETail192 + +// ---------------------------------------------------------------------------- +// Special optimization for the last 64 bytes of plaintext +sealSSETail64: + // Need to encrypt up to 64 bytes - prepare single block, hash 192 or 256 bytes + MOVO ·chacha20Constants<>(SB), A1 + MOVO state1Store, B1 + MOVO state2Store, C1 + MOVO ctr3Store, D1 + PADDL ·sseIncMask<>(SB), D1 + MOVO D1, ctr0Store + +sealSSETail64LoopA: + // Perform ChaCha rounds, while hashing the previously encrypted ciphertext + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealSSETail64LoopB: + chachaQR(A1, B1, C1, D1, T1) + shiftB1Left; shiftC1Left; shiftD1Left + chachaQR(A1, B1, C1, D1, T1) + shiftB1Right; shiftC1Right; shiftD1Right + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + + DECQ itr1 + JG sealSSETail64LoopA + + DECQ itr2 + JGE sealSSETail64LoopB + PADDL ·chacha20Constants<>(SB), A1 + PADDL state1Store, B1 + PADDL state2Store, C1 + PADDL ctr0Store, D1 + + JMP sealSSE128Seal + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of plaintext +sealSSETail128: + // Need to encrypt up to 128 bytes - prepare two blocks, hash 192 or 256 bytes + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + +sealSSETail128LoopA: + // Perform ChaCha rounds, while hashing the previously encrypted ciphertext + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealSSETail128LoopB: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + + DECQ itr1 + JG sealSSETail128LoopA + + DECQ itr2 + JGE sealSSETail128LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1 + PADDL state1Store, B0; PADDL state1Store, B1 + PADDL state2Store, C0; PADDL state2Store, C1 + PADDL ctr0Store, D0; PADDL ctr1Store, D1 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 + MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) + + MOVQ $64, itr1 + LEAQ 64(inp), inp + SUBQ $64, inl + + JMP sealSSE128SealHash + +// ---------------------------------------------------------------------------- +// Special optimization for the last 192 bytes of plaintext +sealSSETail192: + // Need to encrypt up to 192 bytes - prepare three blocks, hash 192 or 256 bytes + MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr2Store + +sealSSETail192LoopA: + // Perform ChaCha rounds, while hashing the previously encrypted ciphertext + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealSSETail192LoopB: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftC0Left; shiftD0Left + shiftB1Left; shiftC1Left; shiftD1Left + shiftB2Left; shiftC2Left; shiftD2Left + + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftC0Right; shiftD0Right + shiftB1Right; shiftC1Right; shiftD1Right + shiftB2Right; shiftC2Right; shiftD2Right + + DECQ itr1 + JG sealSSETail192LoopA + + DECQ itr2 + JGE sealSSETail192LoopB + + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2 + PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2 + PADDL ctr0Store, D0; PADDL ctr1Store, D1; PADDL ctr2Store, D2 + + MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3 + PXOR T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0 + MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup) + MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3 + PXOR T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1 + MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup) + + MOVO A2, A1 + MOVO B2, B1 + MOVO C2, C1 + MOVO D2, D1 + MOVQ $128, itr1 + LEAQ 128(inp), inp + SUBQ $128, inl + + JMP sealSSE128SealHash + +// ---------------------------------------------------------------------------- +// Special seal optimization for buffers smaller than 129 bytes +sealSSE128: + // For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks + MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0 + MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1 + MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2 + MOVO B0, T1; MOVO C0, T2; MOVO D1, T3 + MOVQ $10, itr2 + +sealSSE128InnerCipherLoop: + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Left; shiftB1Left; shiftB2Left + shiftC0Left; shiftC1Left; shiftC2Left + shiftD0Left; shiftD1Left; shiftD2Left + chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0) + shiftB0Right; shiftB1Right; shiftB2Right + shiftC0Right; shiftC1Right; shiftC2Right + shiftD0Right; shiftD1Right; shiftD2Right + DECQ itr2 + JNE sealSSE128InnerCipherLoop + + // A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded + PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2 + PADDL T1, B0; PADDL T1, B1; PADDL T1, B2 + PADDL T2, C1; PADDL T2, C2 + PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2 + PAND ·polyClampMask<>(SB), A0 + MOVOU A0, rStore + MOVOU B0, sStore + + // Hash + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + XORQ itr1, itr1 + +sealSSE128SealHash: + // itr1 holds the number of bytes encrypted but not yet hashed + CMPQ itr1, $16 + JB sealSSE128Seal + polyAdd(0(oup)) + polyMul + + SUBQ $16, itr1 + ADDQ $16, oup + + JMP sealSSE128SealHash + +sealSSE128Seal: + CMPQ inl, $16 + JB sealSSETail + SUBQ $16, inl + + // Load for decryption + MOVOU (inp), T0 + PXOR T0, A1 + MOVOU A1, (oup) + LEAQ (1*16)(inp), inp + LEAQ (1*16)(oup), oup + + // Extract for hashing + MOVQ A1, t0 + PSRLDQ $8, A1 + MOVQ A1, t1 + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + + // Shift the stream "left" + MOVO B1, A1 + MOVO C1, B1 + MOVO D1, C1 + MOVO A2, D1 + MOVO B2, A2 + MOVO C2, B2 + MOVO D2, C2 + JMP sealSSE128Seal + +sealSSETail: + TESTQ inl, inl + JE sealSSEFinalize + + // We can only load the PT one byte at a time to avoid read after end of buffer + MOVQ inl, itr2 + SHLQ $4, itr2 + LEAQ ·andMask<>(SB), t0 + MOVQ inl, itr1 + LEAQ -1(inp)(inl*1), inp + XORQ t2, t2 + XORQ t3, t3 + XORQ AX, AX + +sealSSETailLoadLoop: + SHLQ $8, t2, t3 + SHLQ $8, t2 + MOVB (inp), AX + XORQ AX, t2 + LEAQ -1(inp), inp + DECQ itr1 + JNE sealSSETailLoadLoop + MOVQ t2, 0+tmpStore + MOVQ t3, 8+tmpStore + PXOR 0+tmpStore, A1 + MOVOU A1, (oup) + MOVOU -16(t0)(itr2*1), T0 + PAND T0, A1 + MOVQ A1, t0 + PSRLDQ $8, A1 + MOVQ A1, t1 + ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2 + polyMul + + ADDQ inl, oup + +sealSSEFinalize: + // Hash in the buffer lengths + ADDQ ad_len+80(FP), acc0 + ADCQ src_len+56(FP), acc1 + ADCQ $1, acc2 + polyMul + + // Final reduce + MOVQ acc0, t0 + MOVQ acc1, t1 + MOVQ acc2, t2 + SUBQ $-5, acc0 + SBBQ $-1, acc1 + SBBQ $3, acc2 + CMOVQCS t0, acc0 + CMOVQCS t1, acc1 + CMOVQCS t2, acc2 + + // Add in the "s" part of the key + ADDQ 0+sStore, acc0 + ADCQ 8+sStore, acc1 + + // Finally store the tag at the end of the message + MOVQ acc0, (0*8)(oup) + MOVQ acc1, (1*8)(oup) + RET + +// ---------------------------------------------------------------------------- +// ------------------------- AVX2 Code ---------------------------------------- +chacha20Poly1305Seal_AVX2: + VZEROUPPER + VMOVDQU ·chacha20Constants<>(SB), AA0 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14 + BYTE $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12 + BYTE $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4 + VPADDD ·avx2InitMask<>(SB), DD0, DD0 + + // Special optimizations, for very short buffers + CMPQ inl, $192 + JBE seal192AVX2 // 33% faster + CMPQ inl, $320 + JBE seal320AVX2 // 17% faster + + // For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream + VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3; VMOVDQA BB0, state1StoreAVX2 + VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3; VMOVDQA CC0, state2StoreAVX2 + VPADDD ·avx2IncMask<>(SB), DD0, DD1; VMOVDQA DD0, ctr0StoreAVX2 + VPADDD ·avx2IncMask<>(SB), DD1, DD2; VMOVDQA DD1, ctr1StoreAVX2 + VPADDD ·avx2IncMask<>(SB), DD2, DD3; VMOVDQA DD2, ctr2StoreAVX2 + VMOVDQA DD3, ctr3StoreAVX2 + MOVQ $10, itr2 + +sealAVX2IntroLoop: + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 + VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 + VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 + VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 + + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 + VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 + VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 + VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 + DECQ itr2 + JNE sealAVX2IntroLoop + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + + VPERM2I128 $0x13, CC0, DD0, CC0 // Stream bytes 96 - 127 + VPERM2I128 $0x02, AA0, BB0, DD0 // The Poly1305 key + VPERM2I128 $0x13, AA0, BB0, AA0 // Stream bytes 64 - 95 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), DD0, DD0 + VMOVDQA DD0, rsStoreAVX2 + + // Hash AD + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + + // Can store at least 320 bytes + VPXOR (0*32)(inp), AA0, AA0 + VPXOR (1*32)(inp), CC0, CC0 + VMOVDQU AA0, (0*32)(oup) + VMOVDQU CC0, (1*32)(oup) + + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (2*32)(inp), AA0, AA0; VPXOR (3*32)(inp), BB0, BB0; VPXOR (4*32)(inp), CC0, CC0; VPXOR (5*32)(inp), DD0, DD0 + VMOVDQU AA0, (2*32)(oup); VMOVDQU BB0, (3*32)(oup); VMOVDQU CC0, (4*32)(oup); VMOVDQU DD0, (5*32)(oup) + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (6*32)(inp), AA0, AA0; VPXOR (7*32)(inp), BB0, BB0; VPXOR (8*32)(inp), CC0, CC0; VPXOR (9*32)(inp), DD0, DD0 + VMOVDQU AA0, (6*32)(oup); VMOVDQU BB0, (7*32)(oup); VMOVDQU CC0, (8*32)(oup); VMOVDQU DD0, (9*32)(oup) + + MOVQ $320, itr1 + SUBQ $320, inl + LEAQ 320(inp), inp + + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, CC3, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, CC3, DD3, DD0 + CMPQ inl, $128 + JBE sealAVX2SealHash + + VPXOR (0*32)(inp), AA0, AA0; VPXOR (1*32)(inp), BB0, BB0; VPXOR (2*32)(inp), CC0, CC0; VPXOR (3*32)(inp), DD0, DD0 + VMOVDQU AA0, (10*32)(oup); VMOVDQU BB0, (11*32)(oup); VMOVDQU CC0, (12*32)(oup); VMOVDQU DD0, (13*32)(oup) + SUBQ $128, inl + LEAQ 128(inp), inp + + MOVQ $8, itr1 + MOVQ $2, itr2 + + CMPQ inl, $128 + JBE sealAVX2Tail128 + CMPQ inl, $256 + JBE sealAVX2Tail256 + CMPQ inl, $384 + JBE sealAVX2Tail384 + CMPQ inl, $512 + JBE sealAVX2Tail512 + + // We have 448 bytes to hash, but main loop hashes 512 bytes at a time - perform some rounds, before the main loop + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0 + VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1 + VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2 + VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3 + + VMOVDQA CC3, tmpStoreAVX2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3) + VMOVDQA tmpStoreAVX2, CC3 + VMOVDQA CC1, tmpStoreAVX2 + chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1) + VMOVDQA tmpStoreAVX2, CC1 + + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0 + VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1 + VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2 + VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + + SUBQ $16, oup // Adjust the pointer + MOVQ $9, itr1 + JMP sealAVX2InternalLoopStart + +sealAVX2MainLoop: + // Load state, increment counter blocks, store the incremented counters + VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + MOVQ $10, itr1 + +sealAVX2InternalLoop: + polyAdd(0*8(oup)) + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage1_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulStage2_AVX2 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyMulStage3_AVX2 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + +sealAVX2InternalLoopStart: + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + polyAdd(2*8(oup)) + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage1_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage2_AVX2 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + polyMulStage3_AVX2 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + polyMulReduceStage + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(4*8(oup)) + LEAQ (6*8)(oup), oup + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulStage1_AVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + polyMulStage2_AVX2 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + polyMulStage3_AVX2 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyMulReduceStage + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + DECQ itr1 + JNE sealAVX2InternalLoop + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + + // We only hashed 480 of the 512 bytes available - hash the remaining 32 here + polyAdd(0*8(oup)) + polyMulAVX2 + LEAQ (4*8)(oup), oup + VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0 + VPXOR (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0 + VMOVDQU CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + + // and here + polyAdd(-2*8(oup)) + polyMulAVX2 + VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + VPXOR (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0 + VMOVDQU AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup) + LEAQ (32*16)(inp), inp + SUBQ $(32*16), inl + CMPQ inl, $512 + JG sealAVX2MainLoop + + // Tail can only hash 480 bytes + polyAdd(0*8(oup)) + polyMulAVX2 + polyAdd(2*8(oup)) + polyMulAVX2 + LEAQ 32(oup), oup + + MOVQ $10, itr1 + MOVQ $0, itr2 + CMPQ inl, $128 + JBE sealAVX2Tail128 + CMPQ inl, $256 + JBE sealAVX2Tail256 + CMPQ inl, $384 + JBE sealAVX2Tail384 + JMP sealAVX2Tail512 + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 193 bytes +seal192AVX2: + // For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks + VMOVDQA AA0, AA1 + VMOVDQA BB0, BB1 + VMOVDQA CC0, CC1 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2 + VMOVDQA BB0, BB2 + VMOVDQA CC0, CC2 + VMOVDQA DD0, DD2 + VMOVDQA DD1, TT3 + MOVQ $10, itr2 + +sealAVX2192InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + DECQ itr2 + JNE sealAVX2192InnerCipherLoop + VPADDD AA2, AA0, AA0; VPADDD AA2, AA1, AA1 + VPADDD BB2, BB0, BB0; VPADDD BB2, BB1, BB1 + VPADDD CC2, CC0, CC0; VPADDD CC2, CC1, CC1 + VPADDD DD2, DD0, DD0; VPADDD TT3, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, TT0 + + // Clamp and store poly key + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 192 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + +sealAVX2ShortSeal: + // Hash aad + MOVQ ad_len+80(FP), itr2 + CALL polyHashADInternal<>(SB) + XORQ itr1, itr1 + +sealAVX2SealHash: + // itr1 holds the number of bytes encrypted but not yet hashed + CMPQ itr1, $16 + JB sealAVX2ShortSealLoop + polyAdd(0(oup)) + polyMul + SUBQ $16, itr1 + ADDQ $16, oup + JMP sealAVX2SealHash + +sealAVX2ShortSealLoop: + CMPQ inl, $32 + JB sealAVX2ShortTail32 + SUBQ $32, inl + + // Load for encryption + VPXOR (inp), AA0, AA0 + VMOVDQU AA0, (oup) + LEAQ (1*32)(inp), inp + + // Now can hash + polyAdd(0*8(oup)) + polyMulAVX2 + polyAdd(2*8(oup)) + polyMulAVX2 + LEAQ (1*32)(oup), oup + + // Shift stream left + VMOVDQA BB0, AA0 + VMOVDQA CC0, BB0 + VMOVDQA DD0, CC0 + VMOVDQA AA1, DD0 + VMOVDQA BB1, AA1 + VMOVDQA CC1, BB1 + VMOVDQA DD1, CC1 + VMOVDQA AA2, DD1 + VMOVDQA BB2, AA2 + JMP sealAVX2ShortSealLoop + +sealAVX2ShortTail32: + CMPQ inl, $16 + VMOVDQA A0, A1 + JB sealAVX2ShortDone + + SUBQ $16, inl + + // Load for encryption + VPXOR (inp), A0, T0 + VMOVDQU T0, (oup) + LEAQ (1*16)(inp), inp + + // Hash + polyAdd(0*8(oup)) + polyMulAVX2 + LEAQ (1*16)(oup), oup + VPERM2I128 $0x11, AA0, AA0, AA0 + VMOVDQA A0, A1 + +sealAVX2ShortDone: + VZEROUPPER + JMP sealSSETail + +// ---------------------------------------------------------------------------- +// Special optimization for buffers smaller than 321 bytes +seal320AVX2: + // For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks + VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3 + MOVQ $10, itr2 + +sealAVX2320InnerCipherLoop: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + DECQ itr2 + JNE sealAVX2320InnerCipherLoop + + VMOVDQA ·chacha20Constants<>(SB), TT0 + VPADDD TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2 + VPADDD TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2 + VPADDD TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2 + VMOVDQA ·avx2IncMask<>(SB), TT0 + VPADDD TT3, DD0, DD0; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD1, DD1; VPADDD TT0, TT3, TT3 + VPADDD TT3, DD2, DD2 + + // Clamp and store poly key + VPERM2I128 $0x02, AA0, BB0, TT0 + VPAND ·polyClampMask<>(SB), TT0, TT0 + VMOVDQA TT0, rsStoreAVX2 + + // Stream for up to 320 bytes + VPERM2I128 $0x13, AA0, BB0, AA0 + VPERM2I128 $0x13, CC0, DD0, BB0 + VPERM2I128 $0x02, AA1, BB1, CC0 + VPERM2I128 $0x02, CC1, DD1, DD0 + VPERM2I128 $0x13, AA1, BB1, AA1 + VPERM2I128 $0x13, CC1, DD1, BB1 + VPERM2I128 $0x02, AA2, BB2, CC1 + VPERM2I128 $0x02, CC2, DD2, DD1 + VPERM2I128 $0x13, AA2, BB2, AA2 + VPERM2I128 $0x13, CC2, DD2, BB2 + JMP sealAVX2ShortSeal + +// ---------------------------------------------------------------------------- +// Special optimization for the last 128 bytes of ciphertext +sealAVX2Tail128: + // Need to decrypt up to 128 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0 + VMOVDQA state1StoreAVX2, BB0 + VMOVDQA state2StoreAVX2, CC0 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VMOVDQA DD0, DD1 + +sealAVX2Tail128LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail128LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + polyAdd(0(oup)) + polyMul + VPALIGNR $4, BB0, BB0, BB0 + VPALIGNR $8, CC0, CC0, CC0 + VPALIGNR $12, DD0, DD0, DD0 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0) + polyAdd(16(oup)) + polyMul + LEAQ 32(oup), oup + VPALIGNR $12, BB0, BB0, BB0 + VPALIGNR $8, CC0, CC0, CC0 + VPALIGNR $4, DD0, DD0, DD0 + DECQ itr1 + JG sealAVX2Tail128LoopA + DECQ itr2 + JGE sealAVX2Tail128LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA1 + VPADDD state1StoreAVX2, BB0, BB1 + VPADDD state2StoreAVX2, CC0, CC1 + VPADDD DD1, DD0, DD1 + + VPERM2I128 $0x02, AA1, BB1, AA0 + VPERM2I128 $0x02, CC1, DD1, BB0 + VPERM2I128 $0x13, AA1, BB1, CC0 + VPERM2I128 $0x13, CC1, DD1, DD0 + JMP sealAVX2ShortSealLoop + +// ---------------------------------------------------------------------------- +// Special optimization for the last 256 bytes of ciphertext +sealAVX2Tail256: + // Need to decrypt up to 256 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA ·chacha20Constants<>(SB), AA1 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA state1StoreAVX2, BB1 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA state2StoreAVX2, CC1 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD1 + VMOVDQA DD0, TT1 + VMOVDQA DD1, TT2 + +sealAVX2Tail256LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail256LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + polyAdd(0(oup)) + polyMul + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0) + polyAdd(16(oup)) + polyMul + LEAQ 32(oup), oup + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1 + DECQ itr1 + JG sealAVX2Tail256LoopA + DECQ itr2 + JGE sealAVX2Tail256LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1 + VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1 + VPERM2I128 $0x02, AA0, BB0, TT0 + VPERM2I128 $0x02, CC0, DD0, TT1 + VPERM2I128 $0x13, AA0, BB0, TT2 + VPERM2I128 $0x13, CC0, DD0, TT3 + VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 + VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) + MOVQ $128, itr1 + LEAQ 128(inp), inp + SUBQ $128, inl + VPERM2I128 $0x02, AA1, BB1, AA0 + VPERM2I128 $0x02, CC1, DD1, BB0 + VPERM2I128 $0x13, AA1, BB1, CC0 + VPERM2I128 $0x13, CC1, DD1, DD0 + + JMP sealAVX2SealHash + +// ---------------------------------------------------------------------------- +// Special optimization for the last 384 bytes of ciphertext +sealAVX2Tail384: + // Need to decrypt up to 384 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2 + VMOVDQA DD0, TT1; VMOVDQA DD1, TT2; VMOVDQA DD2, TT3 + +sealAVX2Tail384LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail384LoopB: + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + polyAdd(0(oup)) + polyMul + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2 + chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0) + polyAdd(16(oup)) + polyMul + LEAQ 32(oup), oup + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2 + DECQ itr1 + JG sealAVX2Tail384LoopA + DECQ itr2 + JGE sealAVX2Tail384LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2 + VPADDD TT1, DD0, DD0; VPADDD TT2, DD1, DD1; VPADDD TT3, DD2, DD2 + VPERM2I128 $0x02, AA0, BB0, TT0 + VPERM2I128 $0x02, CC0, DD0, TT1 + VPERM2I128 $0x13, AA0, BB0, TT2 + VPERM2I128 $0x13, CC0, DD0, TT3 + VPXOR (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3 + VMOVDQU TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup) + VPERM2I128 $0x02, AA1, BB1, TT0 + VPERM2I128 $0x02, CC1, DD1, TT1 + VPERM2I128 $0x13, AA1, BB1, TT2 + VPERM2I128 $0x13, CC1, DD1, TT3 + VPXOR (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3 + VMOVDQU TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup) + MOVQ $256, itr1 + LEAQ 256(inp), inp + SUBQ $256, inl + VPERM2I128 $0x02, AA2, BB2, AA0 + VPERM2I128 $0x02, CC2, DD2, BB0 + VPERM2I128 $0x13, AA2, BB2, CC0 + VPERM2I128 $0x13, CC2, DD2, DD0 + + JMP sealAVX2SealHash + +// ---------------------------------------------------------------------------- +// Special optimization for the last 512 bytes of ciphertext +sealAVX2Tail512: + // Need to decrypt up to 512 bytes - prepare two blocks + // If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed + // If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed + VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3 + VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3 + VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3 + VMOVDQA ctr3StoreAVX2, DD0 + VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3 + VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2 + +sealAVX2Tail512LoopA: + polyAdd(0(oup)) + polyMul + LEAQ 16(oup), oup + +sealAVX2Tail512LoopB: + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + polyAdd(0*8(oup)) + polyMulAVX2 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + polyAdd(2*8(oup)) + polyMulAVX2 + LEAQ (4*8)(oup), oup + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPADDD BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3 + VPXOR AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3 + VPSHUFB ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3 + VPADDD DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3 + VPXOR CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3 + VMOVDQA CC3, tmpStoreAVX2 + VPSLLD $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0 + VPSLLD $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1 + VPSLLD $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2 + VPSLLD $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3 + VMOVDQA tmpStoreAVX2, CC3 + VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3 + VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3 + VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3 + + DECQ itr1 + JG sealAVX2Tail512LoopA + DECQ itr2 + JGE sealAVX2Tail512LoopB + + VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3 + VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3 + VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3 + VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3 + VMOVDQA CC3, tmpStoreAVX2 + VPERM2I128 $0x02, AA0, BB0, CC3 + VPXOR (0*32)(inp), CC3, CC3 + VMOVDQU CC3, (0*32)(oup) + VPERM2I128 $0x02, CC0, DD0, CC3 + VPXOR (1*32)(inp), CC3, CC3 + VMOVDQU CC3, (1*32)(oup) + VPERM2I128 $0x13, AA0, BB0, CC3 + VPXOR (2*32)(inp), CC3, CC3 + VMOVDQU CC3, (2*32)(oup) + VPERM2I128 $0x13, CC0, DD0, CC3 + VPXOR (3*32)(inp), CC3, CC3 + VMOVDQU CC3, (3*32)(oup) + + VPERM2I128 $0x02, AA1, BB1, AA0 + VPERM2I128 $0x02, CC1, DD1, BB0 + VPERM2I128 $0x13, AA1, BB1, CC0 + VPERM2I128 $0x13, CC1, DD1, DD0 + VPXOR (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0 + VMOVDQU AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup) + + VPERM2I128 $0x02, AA2, BB2, AA0 + VPERM2I128 $0x02, CC2, DD2, BB0 + VPERM2I128 $0x13, AA2, BB2, CC0 + VPERM2I128 $0x13, CC2, DD2, DD0 + VPXOR (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0 + VMOVDQU AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup) + + MOVQ $384, itr1 + LEAQ 384(inp), inp + SUBQ $384, inl + VPERM2I128 $0x02, AA3, BB3, AA0 + VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0 + VPERM2I128 $0x13, AA3, BB3, CC0 + VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0 + + JMP sealAVX2SealHash diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go new file mode 100644 index 00000000..6313898f --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_generic.go @@ -0,0 +1,81 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chacha20poly1305 + +import ( + "encoding/binary" + + "golang.org/x/crypto/chacha20" + "golang.org/x/crypto/internal/alias" + "golang.org/x/crypto/internal/poly1305" +) + +func writeWithPadding(p *poly1305.MAC, b []byte) { + p.Write(b) + if rem := len(b) % 16; rem != 0 { + var buf [16]byte + padLen := 16 - rem + p.Write(buf[:padLen]) + } +} + +func writeUint64(p *poly1305.MAC, n int) { + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], uint64(n)) + p.Write(buf[:]) +} + +func (c *chacha20poly1305) sealGeneric(dst, nonce, plaintext, additionalData []byte) []byte { + ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize) + ciphertext, tag := out[:len(plaintext)], out[len(plaintext):] + if alias.InexactOverlap(out, plaintext) { + panic("chacha20poly1305: invalid buffer overlap") + } + + var polyKey [32]byte + s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce) + s.XORKeyStream(polyKey[:], polyKey[:]) + s.SetCounter(1) // set the counter to 1, skipping 32 bytes + s.XORKeyStream(ciphertext, plaintext) + + p := poly1305.New(&polyKey) + writeWithPadding(p, additionalData) + writeWithPadding(p, ciphertext) + writeUint64(p, len(additionalData)) + writeUint64(p, len(plaintext)) + p.Sum(tag[:0]) + + return ret +} + +func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + tag := ciphertext[len(ciphertext)-16:] + ciphertext = ciphertext[:len(ciphertext)-16] + + var polyKey [32]byte + s, _ := chacha20.NewUnauthenticatedCipher(c.key[:], nonce) + s.XORKeyStream(polyKey[:], polyKey[:]) + s.SetCounter(1) // set the counter to 1, skipping 32 bytes + + p := poly1305.New(&polyKey) + writeWithPadding(p, additionalData) + writeWithPadding(p, ciphertext) + writeUint64(p, len(additionalData)) + writeUint64(p, len(ciphertext)) + + ret, out := sliceForAppend(dst, len(ciphertext)) + if alias.InexactOverlap(out, ciphertext) { + panic("chacha20poly1305: invalid buffer overlap") + } + if !p.Verify(tag) { + for i := range out { + out[i] = 0 + } + return nil, errOpen + } + + s.XORKeyStream(out, ciphertext) + return ret, nil +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go new file mode 100644 index 00000000..f832b33d --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_noasm.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || !gc || purego +// +build !amd64 !gc purego + +package chacha20poly1305 + +func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte { + return c.sealGeneric(dst, nonce, plaintext, additionalData) +} + +func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + return c.openGeneric(dst, nonce, ciphertext, additionalData) +} diff --git a/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go b/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go new file mode 100644 index 00000000..1cebfe94 --- /dev/null +++ b/vendor/golang.org/x/crypto/chacha20poly1305/xchacha20poly1305.go @@ -0,0 +1,86 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chacha20poly1305 + +import ( + "crypto/cipher" + "errors" + + "golang.org/x/crypto/chacha20" +) + +type xchacha20poly1305 struct { + key [KeySize]byte +} + +// NewX returns a XChaCha20-Poly1305 AEAD that uses the given 256-bit key. +// +// XChaCha20-Poly1305 is a ChaCha20-Poly1305 variant that takes a longer nonce, +// suitable to be generated randomly without risk of collisions. It should be +// preferred when nonce uniqueness cannot be trivially ensured, or whenever +// nonces are randomly generated. +func NewX(key []byte) (cipher.AEAD, error) { + if len(key) != KeySize { + return nil, errors.New("chacha20poly1305: bad key length") + } + ret := new(xchacha20poly1305) + copy(ret.key[:], key) + return ret, nil +} + +func (*xchacha20poly1305) NonceSize() int { + return NonceSizeX +} + +func (*xchacha20poly1305) Overhead() int { + return Overhead +} + +func (x *xchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { + if len(nonce) != NonceSizeX { + panic("chacha20poly1305: bad nonce length passed to Seal") + } + + // XChaCha20-Poly1305 technically supports a 64-bit counter, so there is no + // size limit. However, since we reuse the ChaCha20-Poly1305 implementation, + // the second half of the counter is not available. This is unlikely to be + // an issue because the cipher.AEAD API requires the entire message to be in + // memory, and the counter overflows at 256 GB. + if uint64(len(plaintext)) > (1<<38)-64 { + panic("chacha20poly1305: plaintext too large") + } + + c := new(chacha20poly1305) + hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16]) + copy(c.key[:], hKey) + + // The first 4 bytes of the final nonce are unused counter space. + cNonce := make([]byte, NonceSize) + copy(cNonce[4:12], nonce[16:24]) + + return c.seal(dst, cNonce[:], plaintext, additionalData) +} + +func (x *xchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { + if len(nonce) != NonceSizeX { + panic("chacha20poly1305: bad nonce length passed to Open") + } + if len(ciphertext) < 16 { + return nil, errOpen + } + if uint64(len(ciphertext)) > (1<<38)-48 { + panic("chacha20poly1305: ciphertext too large") + } + + c := new(chacha20poly1305) + hKey, _ := chacha20.HChaCha20(x.key[:], nonce[0:16]) + copy(c.key[:], hKey) + + // The first 4 bytes of the final nonce are unused counter space. + cNonce := make([]byte, NonceSize) + copy(cNonce[4:12], nonce[16:24]) + + return c.open(dst, cNonce[:], ciphertext, additionalData) +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go new file mode 100644 index 00000000..6fc2838a --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -0,0 +1,824 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + encoding_asn1 "encoding/asn1" + "fmt" + "math/big" + "reflect" + "time" + + "golang.org/x/crypto/cryptobyte/asn1" +) + +// This file contains ASN.1-related methods for String and Builder. + +// Builder + +// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Int64(v int64) { + b.addASN1Signed(asn1.INTEGER, v) +} + +// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the +// given tag. +func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { + b.addASN1Signed(tag, v) +} + +// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. +func (b *Builder) AddASN1Enum(v int64) { + b.addASN1Signed(asn1.ENUM, v) +} + +func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { + b.AddASN1(tag, func(c *Builder) { + length := 1 + for i := v; i >= 0x80 || i < -0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Uint64(v uint64) { + b.AddASN1(asn1.INTEGER, func(c *Builder) { + length := 1 + for i := v; i >= 0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1BigInt(n *big.Int) { + if b.err != nil { + return + } + + b.AddASN1(asn1.INTEGER, func(c *Builder) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement form. So we + // invert and subtract 1. If the most-significant-bit isn't set then + // we'll need to pad the beginning with 0xff in order to keep the number + // negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + c.add(0xff) + } + c.add(bytes...) + } else if n.Sign() == 0 { + c.add(0) + } else { + bytes := n.Bytes() + if bytes[0]&0x80 != 0 { + c.add(0) + } + c.add(bytes...) + } + }) +} + +// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. +func (b *Builder) AddASN1OctetString(bytes []byte) { + b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { + c.AddBytes(bytes) + }) +} + +const generalizedTimeFormatStr = "20060102150405Z0700" + +// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. +func (b *Builder) AddASN1GeneralizedTime(t time.Time) { + if t.Year() < 0 || t.Year() > 9999 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) + return + } + b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { + c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) + }) +} + +// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime. +func (b *Builder) AddASN1UTCTime(t time.Time) { + b.AddASN1(asn1.UTCTime, func(c *Builder) { + // As utilized by the X.509 profile, UTCTime can only + // represent the years 1950 through 2049. + if t.Year() < 1950 || t.Year() >= 2050 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t) + return + } + c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr))) + }) +} + +// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not +// support BIT STRINGs that are not a whole number of bytes. +func (b *Builder) AddASN1BitString(data []byte) { + b.AddASN1(asn1.BIT_STRING, func(b *Builder) { + b.AddUint8(0) + b.AddBytes(data) + }) +} + +func (b *Builder) addBase128Int(n int64) { + var length int + if n == 0 { + length = 1 + } else { + for i := n; i > 0; i >>= 7 { + length++ + } + } + + for i := length - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + b.add(o) + } +} + +func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { + if len(oid) < 2 { + return false + } + + if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { + return false + } + + for _, v := range oid { + if v < 0 { + return false + } + } + + return true +} + +func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { + b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { + if !isValidOID(oid) { + b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) + return + } + + b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) + for _, v := range oid[2:] { + b.addBase128Int(int64(v)) + } + }) +} + +func (b *Builder) AddASN1Boolean(v bool) { + b.AddASN1(asn1.BOOLEAN, func(b *Builder) { + if v { + b.AddUint8(0xff) + } else { + b.AddUint8(0) + } + }) +} + +func (b *Builder) AddASN1NULL() { + b.add(uint8(asn1.NULL), 0) +} + +// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if +// successful or records an error if one occurred. +func (b *Builder) MarshalASN1(v interface{}) { + // NOTE(martinkr): This is somewhat of a hack to allow propagation of + // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a + // value embedded into a struct, its tag information is lost. + if b.err != nil { + return + } + bytes, err := encoding_asn1.Marshal(v) + if err != nil { + b.err = err + return + } + b.AddBytes(bytes) +} + +// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. +// Tags greater than 30 are not supported and result in an error (i.e. +// low-tag-number form only). The child builder passed to the +// BuilderContinuation can be used to build the content of the ASN.1 object. +func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { + if b.err != nil { + return + } + // Identifiers with the low five bits set indicate high-tag-number format + // (two or more octets), which we don't support. + if tag&0x1f == 0x1f { + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) + return + } + b.AddUint8(uint8(tag)) + b.addLengthPrefixed(1, true, f) +} + +// String + +// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean +// representation into out and advances. It reports whether the read +// was successful. +func (s *String) ReadASN1Boolean(out *bool) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 { + return false + } + + switch bytes[0] { + case 0: + *out = false + case 0xff: + *out = true + default: + return false + } + + return true +} + +// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does +// not point to an integer, to a big.Int, or to a []byte it panics. Only +// positive and zero values can be decoded into []byte, and they are returned as +// big-endian binary values that share memory with s. Positive values will have +// no leading zeroes, and zero will be returned as a single zero byte. +// ReadASN1Integer reports whether the read was successful. +func (s *String) ReadASN1Integer(out interface{}) bool { + switch out := out.(type) { + case *int, *int8, *int16, *int32, *int64: + var i int64 + if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { + return false + } + reflect.ValueOf(out).Elem().SetInt(i) + return true + case *uint, *uint8, *uint16, *uint32, *uint64: + var u uint64 + if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { + return false + } + reflect.ValueOf(out).Elem().SetUint(u) + return true + case *big.Int: + return s.readASN1BigInt(out) + case *[]byte: + return s.readASN1Bytes(out) + default: + panic("out does not point to an integer type") + } +} + +func checkASN1Integer(bytes []byte) bool { + if len(bytes) == 0 { + // An INTEGER is encoded with at least one octet. + return false + } + if len(bytes) == 1 { + return true + } + if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { + // Value is not minimally encoded. + return false + } + return true +} + +var bigOne = big.NewInt(1) + +func (s *String) readASN1BigInt(out *big.Int) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + // Negative number. + neg := make([]byte, len(bytes)) + for i, b := range bytes { + neg[i] = ^b + } + out.SetBytes(neg) + out.Add(out, bigOne) + out.Neg(out) + } else { + out.SetBytes(bytes) + } + return true +} + +func (s *String) readASN1Bytes(out *[]byte) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + return false + } + for len(bytes) > 1 && bytes[0] == 0 { + bytes = bytes[1:] + } + *out = bytes + return true +} + +func (s *String) readASN1Int64(out *int64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { + return false + } + return true +} + +func asn1Signed(out *int64, n []byte) bool { + length := len(n) + if length > 8 { + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= int64(n[i]) + } + // Shift up and down in order to sign extend the result. + *out <<= 64 - uint8(length)*8 + *out >>= 64 - uint8(length)*8 + return true +} + +func (s *String) readASN1Uint64(out *uint64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { + return false + } + return true +} + +func asn1Unsigned(out *uint64, n []byte) bool { + length := len(n) + if length > 9 || length == 9 && n[0] != 0 { + // Too large for uint64. + return false + } + if n[0]&0x80 != 0 { + // Negative number. + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= uint64(n[i]) + } + return true +} + +// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out +// and advances. It reports whether the read was successful and resulted in a +// value that can be represented in an int64. +func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { + var bytes String + return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) +} + +// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports +// whether the read was successful. +func (s *String) ReadASN1Enum(out *int) bool { + var bytes String + var i int64 + if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { + return false + } + if int64(int(i)) != i { + return false + } + *out = int(i) + return true +} + +func (s *String) readBase128Int(out *int) bool { + ret := 0 + for i := 0; len(*s) > 0; i++ { + if i == 5 { + return false + } + // Avoid overflowing int on a 32-bit platform. + // We don't want different behavior based on the architecture. + if ret >= 1<<(31-7) { + return false + } + ret <<= 7 + b := s.read(1)[0] + + // ITU-T X.690, section 8.19.2: + // The subidentifier shall be encoded in the fewest possible octets, + // that is, the leading octet of the subidentifier shall not have the value 0x80. + if i == 0 && b == 0x80 { + return false + } + + ret |= int(b & 0x7f) + if b&0x80 == 0 { + *out = ret + return true + } + } + return false // truncated +} + +// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { + return false + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + components := make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + var v int + if !bytes.readBase128Int(&v) { + return false + } + if v < 80 { + components[0] = v / 40 + components[1] = v % 40 + } else { + components[0] = 2 + components[1] = v - 80 + } + + i := 2 + for ; len(bytes) > 0; i++ { + if !bytes.readBase128Int(&v) { + return false + } + components[i] = v + } + *out = components[:i] + return true +} + +// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { + return false + } + t := string(bytes) + res, err := time.Parse(generalizedTimeFormatStr, t) + if err != nil { + return false + } + if serialized := res.Format(generalizedTimeFormatStr); serialized != t { + return false + } + *out = res + return true +} + +const defaultUTCTimeFormatStr = "060102150405Z0700" + +// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1UTCTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.UTCTime) { + return false + } + t := string(bytes) + + formatStr := defaultUTCTimeFormatStr + var err error + res, err := time.Parse(formatStr, t) + if err != nil { + // Fallback to minute precision if we can't parse second + // precision. If we are following X.509 or X.690 we shouldn't + // support this, but we do. + formatStr = "0601021504Z0700" + res, err = time.Parse(formatStr, t) + } + if err != nil { + return false + } + + if serialized := res.Format(formatStr); serialized != t { + return false + } + + if res.Year() >= 2050 { + // UTCTime interprets the low order digits 50-99 as 1950-99. + // This only applies to its use in the X.509 profile. + // See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + res = res.AddDate(-100, 0, 0) + } + *out = res + return true +} + +// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || + len(bytes)*8/8 != len(bytes) { + return false + } + + paddingBits := bytes[0] + bytes = bytes[1:] + if paddingBits > 7 || + len(bytes) == 0 && paddingBits != 0 || + len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { + return false + } + + lenBytes := String((*s)[2 : 2+lenLen]) + if !lenBytes.readUnsigned(&len32, int(lenLen)) { + return false + } + + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. + if len32 < 128 { + // Length should have used short-form encoding. + return false + } + if len32>>((lenLen-1)*8) == 0 { + // Leading octet is 0. Length should have been at least one byte shorter. + return false + } + + headerLen = 2 + uint32(lenLen) + if headerLen+len32 < len32 { + // Overflow. + return false + } + length = headerLen + len32 + } + + if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { + return false + } + if skipHeader && !out.Skip(int(headerLen)) { + panic("cryptobyte: internal error") + } + + return true +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go new file mode 100644 index 00000000..cda8e3ed --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 contains supporting types for parsing and building ASN.1 +// messages with the cryptobyte package. +package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" + +// Tag represents an ASN.1 identifier octet, consisting of a tag number +// (indicating a type) and class (such as context-specific or constructed). +// +// Methods in the cryptobyte package only support the low-tag-number form, i.e. +// a single identifier octet with bits 7-8 encoding the class and bits 1-6 +// encoding the tag number. +type Tag uint8 + +const ( + classConstructed = 0x20 + classContextSpecific = 0x80 +) + +// Constructed returns t with the constructed class bit set. +func (t Tag) Constructed() Tag { return t | classConstructed } + +// ContextSpecific returns t with the context-specific class bit set. +func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } + +// The following is a list of standard tag and class combinations. +const ( + BOOLEAN = Tag(1) + INTEGER = Tag(2) + BIT_STRING = Tag(3) + OCTET_STRING = Tag(4) + NULL = Tag(5) + OBJECT_IDENTIFIER = Tag(6) + ENUM = Tag(10) + UTF8String = Tag(12) + SEQUENCE = Tag(16 | classConstructed) + SET = Tag(17 | classConstructed) + PrintableString = Tag(19) + T61String = Tag(20) + IA5String = Tag(22) + UTCTime = Tag(23) + GeneralizedTime = Tag(24) + GeneralString = Tag(27) +) diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go new file mode 100644 index 00000000..cf254f5f --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/builder.go @@ -0,0 +1,350 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "errors" + "fmt" +) + +// A Builder builds byte strings from fixed-length and length-prefixed values. +// Builders either allocate space as needed, or are ‘fixed’, which means that +// they write into a given buffer and produce an error if it's exhausted. +// +// The zero value is a usable Builder that allocates space as needed. +// +// Simple values are marshaled and appended to a Builder using methods on the +// Builder. Length-prefixed values are marshaled by providing a +// BuilderContinuation, which is a function that writes the inner contents of +// the value to a given Builder. See the documentation for BuilderContinuation +// for details. +type Builder struct { + err error + result []byte + fixedSize bool + child *Builder + offset int + pendingLenLen int + pendingIsASN1 bool + inContinuation *bool +} + +// NewBuilder creates a Builder that appends its output to the given buffer. +// Like append(), the slice will be reallocated if its capacity is exceeded. +// Use Bytes to get the final buffer. +func NewBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + } +} + +// NewFixedBuilder creates a Builder that appends its output into the given +// buffer. This builder does not reallocate the output buffer. Writes that +// would exceed the buffer's capacity are treated as an error. +func NewFixedBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + fixedSize: true, + } +} + +// SetError sets the value to be returned as the error from Bytes. Writes +// performed after calling SetError are ignored. +func (b *Builder) SetError(err error) { + b.err = err +} + +// Bytes returns the bytes written by the builder or an error if one has +// occurred during building. +func (b *Builder) Bytes() ([]byte, error) { + if b.err != nil { + return nil, b.err + } + return b.result[b.offset:], nil +} + +// BytesOrPanic returns the bytes written by the builder or panics if an error +// has occurred during building. +func (b *Builder) BytesOrPanic() []byte { + if b.err != nil { + panic(b.err) + } + return b.result[b.offset:] +} + +// AddUint8 appends an 8-bit value to the byte string. +func (b *Builder) AddUint8(v uint8) { + b.add(byte(v)) +} + +// AddUint16 appends a big-endian, 16-bit value to the byte string. +func (b *Builder) AddUint16(v uint16) { + b.add(byte(v>>8), byte(v)) +} + +// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest +// byte of the 32-bit input value is silently truncated. +func (b *Builder) AddUint24(v uint32) { + b.add(byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint32 appends a big-endian, 32-bit value to the byte string. +func (b *Builder) AddUint32(v uint32) { + b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint48 appends a big-endian, 48-bit value to the byte string. +func (b *Builder) AddUint48(v uint64) { + b.add(byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint64 appends a big-endian, 64-bit value to the byte string. +func (b *Builder) AddUint64(v uint64) { + b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddBytes appends a sequence of bytes to the byte string. +func (b *Builder) AddBytes(v []byte) { + b.add(v...) +} + +// BuilderContinuation is a continuation-passing interface for building +// length-prefixed byte sequences. Builder methods for length-prefixed +// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation +// supplied to them. The child builder passed to the continuation can be used +// to build the content of the length-prefixed sequence. For example: +// +// parent := cryptobyte.NewBuilder() +// parent.AddUint8LengthPrefixed(func (child *Builder) { +// child.AddUint8(42) +// child.AddUint8LengthPrefixed(func (grandchild *Builder) { +// grandchild.AddUint8(5) +// }) +// }) +// +// It is an error to write more bytes to the child than allowed by the reserved +// length prefix. After the continuation returns, the child must be considered +// invalid, i.e. users must not store any copies or references of the child +// that outlive the continuation. +// +// If the continuation panics with a value of type BuildError then the inner +// error will be returned as the error from Bytes. If the child panics +// otherwise then Bytes will repanic with the same value. +type BuilderContinuation func(child *Builder) + +// BuildError wraps an error. If a BuilderContinuation panics with this value, +// the panic will be recovered and the inner error will be returned from +// Builder.Bytes. +type BuildError struct { + Err error +} + +// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. +func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(1, false, f) +} + +// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. +func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(2, false, f) +} + +// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. +func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(3, false, f) +} + +// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. +func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(4, false, f) +} + +func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { + if !*b.inContinuation { + *b.inContinuation = true + + defer func() { + *b.inContinuation = false + + r := recover() + if r == nil { + return + } + + if buildError, ok := r.(BuildError); ok { + b.err = buildError.Err + } else { + panic(r) + } + }() + } + + f(arg) +} + +func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { + // Subsequent writes can be ignored if the builder has encountered an error. + if b.err != nil { + return + } + + offset := len(b.result) + b.add(make([]byte, lenLen)...) + + if b.inContinuation == nil { + b.inContinuation = new(bool) + } + + b.child = &Builder{ + result: b.result, + fixedSize: b.fixedSize, + offset: offset, + pendingLenLen: lenLen, + pendingIsASN1: isASN1, + inContinuation: b.inContinuation, + } + + b.callContinuation(f, b.child) + b.flushChild() + if b.child != nil { + panic("cryptobyte: internal error") + } +} + +func (b *Builder) flushChild() { + if b.child == nil { + return + } + b.child.flushChild() + child := b.child + b.child = nil + + if child.err != nil { + b.err = child.err + return + } + + length := len(child.result) - child.pendingLenLen - child.offset + + if length < 0 { + panic("cryptobyte: internal error") // result unexpectedly shrunk + } + + if child.pendingIsASN1 { + // For ASN.1, we reserved a single byte for the length. If that turned out + // to be incorrect, we have to move the contents along in order to make + // space. + if child.pendingLenLen != 1 { + panic("cryptobyte: internal error") + } + var lenLen, lenByte uint8 + if int64(length) > 0xfffffffe { + b.err = errors.New("pending ASN.1 child too long") + return + } else if length > 0xffffff { + lenLen = 5 + lenByte = 0x80 | 4 + } else if length > 0xffff { + lenLen = 4 + lenByte = 0x80 | 3 + } else if length > 0xff { + lenLen = 3 + lenByte = 0x80 | 2 + } else if length > 0x7f { + lenLen = 2 + lenByte = 0x80 | 1 + } else { + lenLen = 1 + lenByte = uint8(length) + length = 0 + } + + // Insert the initial length byte, make space for successive length bytes, + // and adjust the offset. + child.result[child.offset] = lenByte + extraBytes := int(lenLen - 1) + if extraBytes != 0 { + child.add(make([]byte, extraBytes)...) + childStart := child.offset + child.pendingLenLen + copy(child.result[childStart+extraBytes:], child.result[childStart:]) + } + child.offset++ + child.pendingLenLen = extraBytes + } + + l := length + for i := child.pendingLenLen - 1; i >= 0; i-- { + child.result[child.offset+i] = uint8(l) + l >>= 8 + } + if l != 0 { + b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) + return + } + + if b.fixedSize && &b.result[0] != &child.result[0] { + panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") + } + + b.result = child.result +} + +func (b *Builder) add(bytes ...byte) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted write while child is pending") + } + if len(b.result)+len(bytes) < len(bytes) { + b.err = errors.New("cryptobyte: length overflow") + } + if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { + b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") + return + } + b.result = append(b.result, bytes...) +} + +// Unwrite rolls back non-negative n bytes written directly to the Builder. +// An attempt by a child builder passed to a continuation to unwrite bytes +// from its parent will panic. +func (b *Builder) Unwrite(n int) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted unwrite while child is pending") + } + length := len(b.result) - b.pendingLenLen - b.offset + if length < 0 { + panic("cryptobyte: internal error") + } + if n < 0 { + panic("cryptobyte: attempted to unwrite negative number of bytes") + } + if n > length { + panic("cryptobyte: attempted to unwrite more than was written") + } + b.result = b.result[:len(b.result)-n] +} + +// A MarshalingValue marshals itself into a Builder. +type MarshalingValue interface { + // Marshal is called by Builder.AddValue. It receives a pointer to a builder + // to marshal itself into. It may return an error that occurred during + // marshaling, such as unset or invalid values. + Marshal(b *Builder) error +} + +// AddValue calls Marshal on v, passing a pointer to the builder to append to. +// If Marshal returns an error, it is set on the Builder so that subsequent +// appends don't have an effect. +func (b *Builder) AddValue(v MarshalingValue) { + err := v.Marshal(b) + if err != nil { + b.err = err + } +} diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go new file mode 100644 index 00000000..10692a8a --- /dev/null +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -0,0 +1,183 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cryptobyte contains types that help with parsing and constructing +// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage +// contains useful ASN.1 constants.) +// +// The String type is for parsing. It wraps a []byte slice and provides helper +// functions for consuming structures, value by value. +// +// The Builder type is for constructing messages. It providers helper functions +// for appending values and also for appending length-prefixed submessages – +// without having to worry about calculating the length prefix ahead of time. +// +// See the documentation and examples for the Builder and String types to get +// started. +package cryptobyte // import "golang.org/x/crypto/cryptobyte" + +// String represents a string of bytes. It provides methods for parsing +// fixed-length and length-prefixed values from it. +type String []byte + +// read advances a String by n bytes and returns them. If less than n bytes +// remain, it returns nil. +func (s *String) read(n int) []byte { + if len(*s) < n || n < 0 { + return nil + } + v := (*s)[:n] + *s = (*s)[n:] + return v +} + +// Skip advances the String by n byte and reports whether it was successful. +func (s *String) Skip(n int) bool { + return s.read(n) != nil +} + +// ReadUint8 decodes an 8-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint8(out *uint8) bool { + v := s.read(1) + if v == nil { + return false + } + *out = uint8(v[0]) + return true +} + +// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint16(out *uint16) bool { + v := s.read(2) + if v == nil { + return false + } + *out = uint16(v[0])<<8 | uint16(v[1]) + return true +} + +// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint24(out *uint32) bool { + v := s.read(3) + if v == nil { + return false + } + *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) + return true +} + +// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint32(out *uint32) bool { + v := s.read(4) + if v == nil { + return false + } + *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) + return true +} + +// ReadUint48 decodes a big-endian, 48-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint48(out *uint64) bool { + v := s.read(6) + if v == nil { + return false + } + *out = uint64(v[0])<<40 | uint64(v[1])<<32 | uint64(v[2])<<24 | uint64(v[3])<<16 | uint64(v[4])<<8 | uint64(v[5]) + return true +} + +// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint64(out *uint64) bool { + v := s.read(8) + if v == nil { + return false + } + *out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7]) + return true +} + +func (s *String) readUnsigned(out *uint32, length int) bool { + v := s.read(length) + if v == nil { + return false + } + var result uint32 + for i := 0; i < length; i++ { + result <<= 8 + result |= uint32(v[i]) + } + *out = result + return true +} + +func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { + lenBytes := s.read(lenLen) + if lenBytes == nil { + return false + } + var length uint32 + for _, b := range lenBytes { + length = length << 8 + length = length | uint32(b) + } + v := s.read(int(length)) + if v == nil { + return false + } + *outChild = v + return true +} + +// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value +// into out and advances over it. It reports whether the read was successful. +func (s *String) ReadUint8LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(1, out) +} + +// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit +// length-prefixed value into out and advances over it. It reports whether the +// read was successful. +func (s *String) ReadUint16LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(2, out) +} + +// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit +// length-prefixed value into out and advances over it. It reports whether +// the read was successful. +func (s *String) ReadUint24LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(3, out) +} + +// ReadBytes reads n bytes into out and advances over them. It reports +// whether the read was successful. +func (s *String) ReadBytes(out *[]byte, n int) bool { + v := s.read(n) + if v == nil { + return false + } + *out = v + return true +} + +// CopyBytes copies len(out) bytes into out and advances over them. It reports +// whether the copy operation was successful +func (s *String) CopyBytes(out []byte) bool { + n := len(out) + v := s.read(n) + if v == nil { + return false + } + return copy(out, v) == n +} + +// Empty reports whether the string does not contain any bytes. +func (s String) Empty() bool { + return len(s) == 0 +} diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go deleted file mode 100644 index a7828345..00000000 --- a/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ed25519 implements the Ed25519 signature algorithm. See -// https://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519” function defined in -// RFC 8032. However, unlike RFC 8032's formulation, this package's private key -// representation includes a public key suffix to make multiple signing -// operations with the same key more efficient. This package refers to the RFC -// 8032 private key as the “seed”. -// -// Beginning with Go 1.13, the functionality of this package was moved to the -// standard library as crypto/ed25519. This package only acts as a compatibility -// wrapper. -package ed25519 - -import ( - "crypto/ed25519" - "io" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 - // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. - SeedSize = 32 -) - -// PublicKey is the type of Ed25519 public keys. -// -// This type is an alias for crypto/ed25519's PublicKey type. -// See the crypto/ed25519 package for the methods on this type. -type PublicKey = ed25519.PublicKey - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -// -// This type is an alias for crypto/ed25519's PrivateKey type. -// See the crypto/ed25519 package for the methods on this type. -type PrivateKey = ed25519.PrivateKey - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { - return ed25519.GenerateKey(rand) -} - -// NewKeyFromSeed calculates a private key from a seed. It will panic if -// len(seed) is not SeedSize. This function is provided for interoperability -// with RFC 8032. RFC 8032's private keys correspond to seeds in this -// package. -func NewKeyFromSeed(seed []byte) PrivateKey { - return ed25519.NewKeyFromSeed(seed) -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - return ed25519.Sign(privateKey, message) -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - return ed25519.Verify(publicKey, message, sig) -} diff --git a/vendor/golang.org/x/crypto/hkdf/hkdf.go b/vendor/golang.org/x/crypto/hkdf/hkdf.go new file mode 100644 index 00000000..dda3f143 --- /dev/null +++ b/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -0,0 +1,93 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hkdf implements the HMAC-based Extract-and-Expand Key Derivation +// Function (HKDF) as defined in RFC 5869. +// +// HKDF is a cryptographic key derivation function (KDF) with the goal of +// expanding limited input keying material into one or more cryptographically +// strong secret keys. +package hkdf // import "golang.org/x/crypto/hkdf" + +import ( + "crypto/hmac" + "errors" + "hash" + "io" +) + +// Extract generates a pseudorandom key for use with Expand from an input secret +// and an optional independent salt. +// +// Only use this function if you need to reuse the extracted key with multiple +// Expand invocations and different context values. Most common scenarios, +// including the generation of multiple keys, should use New instead. +func Extract(hash func() hash.Hash, secret, salt []byte) []byte { + if salt == nil { + salt = make([]byte, hash().Size()) + } + extractor := hmac.New(hash, salt) + extractor.Write(secret) + return extractor.Sum(nil) +} + +type hkdf struct { + expander hash.Hash + size int + + info []byte + counter byte + + prev []byte + buf []byte +} + +func (f *hkdf) Read(p []byte) (int, error) { + // Check whether enough data can be generated + need := len(p) + remains := len(f.buf) + int(255-f.counter+1)*f.size + if remains < need { + return 0, errors.New("hkdf: entropy limit reached") + } + // Read any leftover from the buffer + n := copy(p, f.buf) + p = p[n:] + + // Fill the rest of the buffer + for len(p) > 0 { + f.expander.Reset() + f.expander.Write(f.prev) + f.expander.Write(f.info) + f.expander.Write([]byte{f.counter}) + f.prev = f.expander.Sum(f.prev[:0]) + f.counter++ + + // Copy the new batch into p + f.buf = f.prev + n = copy(p, f.buf) + p = p[n:] + } + // Save leftovers for next run + f.buf = f.buf[n:] + + return need, nil +} + +// Expand returns a Reader, from which keys can be read, using the given +// pseudorandom key and optional context info, skipping the extraction step. +// +// The pseudorandomKey should have been generated by Extract, or be a uniformly +// random or pseudorandom cryptographically strong key. See RFC 5869, Section +// 3.3. Most common scenarios will want to use New instead. +func Expand(hash func() hash.Hash, pseudorandomKey, info []byte) io.Reader { + expander := hmac.New(hash, pseudorandomKey) + return &hkdf{expander, expander.Size(), info, 1, nil, nil} +} + +// New returns a Reader, from which keys can be read, using the given hash, +// secret, salt and context info. Salt and info can be nil. +func New(hash func() hash.Hash, secret, salt, info []byte) io.Reader { + prk := Extract(hash, secret, salt) + return Expand(hash, prk, info) +} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go index fc04d03e..27d0e14a 100644 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -16,8 +16,9 @@ import ( // Certificate algorithm names from [PROTOCOL.certkeys]. These values can appear // in Certificate.Type, PublicKey.Type, and ClientConfig.HostKeyAlgorithms. -// Unlike key algorithm names, these are not passed to AlgorithmSigner and don't -// appear in the Signature.Format field. +// Unlike key algorithm names, these are not passed to AlgorithmSigner nor +// returned by MultiAlgorithmSigner and don't appear in the Signature.Format +// field. const ( CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" @@ -255,10 +256,17 @@ func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { return nil, errors.New("ssh: signer and cert have different public key") } - if algorithmSigner, ok := signer.(AlgorithmSigner); ok { + switch s := signer.(type) { + case MultiAlgorithmSigner: + return &multiAlgorithmSigner{ + AlgorithmSigner: &algorithmOpenSSHCertSigner{ + &openSSHCertSigner{cert, signer}, s}, + supportedAlgorithms: s.Algorithms(), + }, nil + case AlgorithmSigner: return &algorithmOpenSSHCertSigner{ - &openSSHCertSigner{cert, signer}, algorithmSigner}, nil - } else { + &openSSHCertSigner{cert, signer}, s}, nil + default: return &openSSHCertSigner{cert, signer}, nil } } @@ -432,7 +440,9 @@ func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { } // SignCert signs the certificate with an authority, setting the Nonce, -// SignatureKey, and Signature fields. +// SignatureKey, and Signature fields. If the authority implements the +// MultiAlgorithmSigner interface the first algorithm in the list is used. This +// is useful if you want to sign with a specific algorithm. func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { c.Nonce = make([]byte, 32) if _, err := io.ReadFull(rand, c.Nonce); err != nil { @@ -440,8 +450,20 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { } c.SignatureKey = authority.PublicKey() - // Default to KeyAlgoRSASHA512 for ssh-rsa signers. - if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA { + if v, ok := authority.(MultiAlgorithmSigner); ok { + if len(v.Algorithms()) == 0 { + return errors.New("the provided authority has no signature algorithm") + } + // Use the first algorithm in the list. + sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), v.Algorithms()[0]) + if err != nil { + return err + } + c.Signature = sig + return nil + } else if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA { + // Default to KeyAlgoRSASHA512 for ssh-rsa signers. + // TODO: consider using KeyAlgoRSASHA256 as default. sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), KeyAlgoRSASHA512) if err != nil { return err diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go index 409b5ea1..5c3bc257 100644 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -71,7 +71,9 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { for auth := AuthMethod(new(noneAuth)); auth != nil; { ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions) if err != nil { - return err + // We return the error later if there is no other method left to + // try. + ok = authFailure } if ok == authSuccess { // success @@ -101,6 +103,12 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error { } } } + + if auth == nil && err != nil { + // We have an error and there are no other authentication methods to + // try, so we return it. + return err + } } return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried) } @@ -217,21 +225,45 @@ func (cb publicKeyCallback) method() string { return "publickey" } -func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (as AlgorithmSigner, algo string) { +func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiAlgorithmSigner, string, error) { + var as MultiAlgorithmSigner keyFormat := signer.PublicKey().Type() - // Like in sendKexInit, if the public key implements AlgorithmSigner we - // assume it supports all algorithms, otherwise only the key format one. - as, ok := signer.(AlgorithmSigner) - if !ok { - return algorithmSignerWrapper{signer}, keyFormat + // If the signer implements MultiAlgorithmSigner we use the algorithms it + // support, if it implements AlgorithmSigner we assume it supports all + // algorithms, otherwise only the key format one. + switch s := signer.(type) { + case MultiAlgorithmSigner: + as = s + case AlgorithmSigner: + as = &multiAlgorithmSigner{ + AlgorithmSigner: s, + supportedAlgorithms: algorithmsForKeyFormat(underlyingAlgo(keyFormat)), + } + default: + as = &multiAlgorithmSigner{ + AlgorithmSigner: algorithmSignerWrapper{signer}, + supportedAlgorithms: []string{underlyingAlgo(keyFormat)}, + } + } + + getFallbackAlgo := func() (string, error) { + // Fallback to use if there is no "server-sig-algs" extension or a + // common algorithm cannot be found. We use the public key format if the + // MultiAlgorithmSigner supports it, otherwise we return an error. + if !contains(as.Algorithms(), underlyingAlgo(keyFormat)) { + return "", fmt.Errorf("ssh: no common public key signature algorithm, server only supports %q for key type %q, signer only supports %v", + underlyingAlgo(keyFormat), keyFormat, as.Algorithms()) + } + return keyFormat, nil } extPayload, ok := extensions["server-sig-algs"] if !ok { - // If there is no "server-sig-algs" extension, fall back to the key - // format algorithm. - return as, keyFormat + // If there is no "server-sig-algs" extension use the fallback + // algorithm. + algo, err := getFallbackAlgo() + return as, algo, err } // The server-sig-algs extension only carries underlying signature @@ -245,15 +277,22 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (as Alg } } - keyAlgos := algorithmsForKeyFormat(keyFormat) + // Filter algorithms based on those supported by MultiAlgorithmSigner. + var keyAlgos []string + for _, algo := range algorithmsForKeyFormat(keyFormat) { + if contains(as.Algorithms(), underlyingAlgo(algo)) { + keyAlgos = append(keyAlgos, algo) + } + } + algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos) if err != nil { - // If there is no overlap, try the key anyway with the key format - // algorithm, to support servers that fail to list all supported - // algorithms. - return as, keyFormat + // If there is no overlap, return the fallback algorithm to support + // servers that fail to list all supported algorithms. + algo, err := getFallbackAlgo() + return as, algo, err } - return as, algo + return as, algo, nil } func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) { @@ -267,10 +306,17 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand return authFailure, nil, err } var methods []string + var errSigAlgo error for _, signer := range signers { pub := signer.PublicKey() - as, algo := pickSignatureAlgorithm(signer, extensions) - + as, algo, err := pickSignatureAlgorithm(signer, extensions) + if err != nil && errSigAlgo == nil { + // If we cannot negotiate a signature algorithm store the first + // error so we can return it to provide a more meaningful message if + // no other signers work. + errSigAlgo = err + continue + } ok, err := validateKey(pub, algo, user, c) if err != nil { return authFailure, nil, err @@ -317,22 +363,12 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand // contain the "publickey" method, do not attempt to authenticate with any // other keys. According to RFC 4252 Section 7, the latter can occur when // additional authentication methods are required. - if success == authSuccess || !containsMethod(methods, cb.method()) { + if success == authSuccess || !contains(methods, cb.method()) { return success, methods, err } } - return authFailure, methods, nil -} - -func containsMethod(methods []string, method string) bool { - for _, m := range methods { - if m == method { - return true - } - } - - return false + return authFailure, methods, errSigAlgo } // validateKey validates the key provided is acceptable to the server. diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go index f6bff60d..edbe6334 100644 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -13,6 +13,7 @@ others. References: + [PROTOCOL]: https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=HEAD [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index 07a1843e..70a7369f 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -461,19 +461,24 @@ func (t *handshakeTransport) sendKexInit() error { isServer := len(t.hostKeys) > 0 if isServer { for _, k := range t.hostKeys { - // If k is an AlgorithmSigner, presume it supports all signature algorithms - // associated with the key format. (Ideally AlgorithmSigner would have a - // method to advertise supported algorithms, but it doesn't. This means that - // adding support for a new algorithm is a breaking change, as we will - // immediately negotiate it even if existing implementations don't support - // it. If that ever happens, we'll have to figure something out.) - // If k is not an AlgorithmSigner, we can only assume it only supports the - // algorithms that matches the key format. (This means that Sign can't pick - // a different default.) + // If k is a MultiAlgorithmSigner, we restrict the signature + // algorithms. If k is a AlgorithmSigner, presume it supports all + // signature algorithms associated with the key format. If k is not + // an AlgorithmSigner, we can only assume it only supports the + // algorithms that matches the key format. (This means that Sign + // can't pick a different default). keyFormat := k.PublicKey().Type() - if _, ok := k.(AlgorithmSigner); ok { + + switch s := k.(type) { + case MultiAlgorithmSigner: + for _, algo := range algorithmsForKeyFormat(keyFormat) { + if contains(s.Algorithms(), underlyingAlgo(algo)) { + msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo) + } + } + case AlgorithmSigner: msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algorithmsForKeyFormat(keyFormat)...) - } else { + default: msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat) } } @@ -642,16 +647,20 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { // On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO // message with the server-sig-algs extension if the client supports it. See - // RFC 8308, Sections 2.4 and 3.1. + // RFC 8308, Sections 2.4 and 3.1, and [PROTOCOL], Section 1.9. if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") { extInfo := &extInfoMsg{ - NumExtensions: 1, - Payload: make([]byte, 0, 4+15+4+len(supportedPubKeyAuthAlgosList)), + NumExtensions: 2, + Payload: make([]byte, 0, 4+15+4+len(supportedPubKeyAuthAlgosList)+4+16+4+1), } extInfo.Payload = appendInt(extInfo.Payload, len("server-sig-algs")) extInfo.Payload = append(extInfo.Payload, "server-sig-algs"...) extInfo.Payload = appendInt(extInfo.Payload, len(supportedPubKeyAuthAlgosList)) extInfo.Payload = append(extInfo.Payload, supportedPubKeyAuthAlgosList...) + extInfo.Payload = appendInt(extInfo.Payload, len("ping@openssh.com")) + extInfo.Payload = append(extInfo.Payload, "ping@openssh.com"...) + extInfo.Payload = appendInt(extInfo.Payload, 1) + extInfo.Payload = append(extInfo.Payload, "0"...) if err := t.conn.writePacket(Marshal(extInfo)); err != nil { return err } @@ -685,9 +694,16 @@ func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, a func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner { for _, k := range hostKeys { + if s, ok := k.(MultiAlgorithmSigner); ok { + if !contains(s.Algorithms(), underlyingAlgo(algo)) { + continue + } + } + if algo == k.PublicKey().Type() { return algorithmSignerWrapper{k} } + k, ok := k.(AlgorithmSigner) if !ok { continue diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go index dac8ee72..ef1bad73 100644 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -11,13 +11,16 @@ import ( "crypto/cipher" "crypto/dsa" "crypto/ecdsa" + "crypto/ed25519" "crypto/elliptic" "crypto/md5" + "crypto/rand" "crypto/rsa" "crypto/sha256" "crypto/x509" "encoding/asn1" "encoding/base64" + "encoding/binary" "encoding/hex" "encoding/pem" "errors" @@ -26,7 +29,6 @@ import ( "math/big" "strings" - "golang.org/x/crypto/ed25519" "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" ) @@ -295,6 +297,18 @@ func MarshalAuthorizedKey(key PublicKey) []byte { return b.Bytes() } +// MarshalPrivateKey returns a PEM block with the private key serialized in the +// OpenSSH format. +func MarshalPrivateKey(key crypto.PrivateKey, comment string) (*pem.Block, error) { + return marshalOpenSSHPrivateKey(key, comment, unencryptedOpenSSHMarshaler) +} + +// MarshalPrivateKeyWithPassphrase returns a PEM block holding the encrypted +// private key serialized in the OpenSSH format. +func MarshalPrivateKeyWithPassphrase(key crypto.PrivateKey, comment string, passphrase []byte) (*pem.Block, error) { + return marshalOpenSSHPrivateKey(key, comment, passphraseProtectedOpenSSHMarshaler(passphrase)) +} + // PublicKey represents a public key using an unspecified algorithm. // // Some PublicKeys provided by this package also implement CryptoPublicKey. @@ -321,7 +335,7 @@ type CryptoPublicKey interface { // A Signer can create signatures that verify against a public key. // -// Some Signers provided by this package also implement AlgorithmSigner. +// Some Signers provided by this package also implement MultiAlgorithmSigner. type Signer interface { // PublicKey returns the associated PublicKey. PublicKey() PublicKey @@ -336,9 +350,9 @@ type Signer interface { // An AlgorithmSigner is a Signer that also supports specifying an algorithm to // use for signing. // -// An AlgorithmSigner can't advertise the algorithms it supports, so it should -// be prepared to be invoked with every algorithm supported by the public key -// format. +// An AlgorithmSigner can't advertise the algorithms it supports, unless it also +// implements MultiAlgorithmSigner, so it should be prepared to be invoked with +// every algorithm supported by the public key format. type AlgorithmSigner interface { Signer @@ -349,6 +363,75 @@ type AlgorithmSigner interface { SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) } +// MultiAlgorithmSigner is an AlgorithmSigner that also reports the algorithms +// supported by that signer. +type MultiAlgorithmSigner interface { + AlgorithmSigner + + // Algorithms returns the available algorithms in preference order. The list + // must not be empty, and it must not include certificate types. + Algorithms() []string +} + +// NewSignerWithAlgorithms returns a signer restricted to the specified +// algorithms. The algorithms must be set in preference order. The list must not +// be empty, and it must not include certificate types. An error is returned if +// the specified algorithms are incompatible with the public key type. +func NewSignerWithAlgorithms(signer AlgorithmSigner, algorithms []string) (MultiAlgorithmSigner, error) { + if len(algorithms) == 0 { + return nil, errors.New("ssh: please specify at least one valid signing algorithm") + } + var signerAlgos []string + supportedAlgos := algorithmsForKeyFormat(underlyingAlgo(signer.PublicKey().Type())) + if s, ok := signer.(*multiAlgorithmSigner); ok { + signerAlgos = s.Algorithms() + } else { + signerAlgos = supportedAlgos + } + + for _, algo := range algorithms { + if !contains(supportedAlgos, algo) { + return nil, fmt.Errorf("ssh: algorithm %q is not supported for key type %q", + algo, signer.PublicKey().Type()) + } + if !contains(signerAlgos, algo) { + return nil, fmt.Errorf("ssh: algorithm %q is restricted for the provided signer", algo) + } + } + return &multiAlgorithmSigner{ + AlgorithmSigner: signer, + supportedAlgorithms: algorithms, + }, nil +} + +type multiAlgorithmSigner struct { + AlgorithmSigner + supportedAlgorithms []string +} + +func (s *multiAlgorithmSigner) Algorithms() []string { + return s.supportedAlgorithms +} + +func (s *multiAlgorithmSigner) isAlgorithmSupported(algorithm string) bool { + if algorithm == "" { + algorithm = underlyingAlgo(s.PublicKey().Type()) + } + for _, algo := range s.supportedAlgorithms { + if algorithm == algo { + return true + } + } + return false +} + +func (s *multiAlgorithmSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { + if !s.isAlgorithmSupported(algorithm) { + return nil, fmt.Errorf("ssh: algorithm %q is not supported: %v", algorithm, s.supportedAlgorithms) + } + return s.AlgorithmSigner.SignWithAlgorithm(rand, data, algorithm) +} + type rsaPublicKey rsa.PublicKey func (r *rsaPublicKey) Type() string { @@ -512,6 +595,10 @@ func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { return k.SignWithAlgorithm(rand, data, k.PublicKey().Type()) } +func (k *dsaPrivateKey) Algorithms() []string { + return []string{k.PublicKey().Type()} +} + func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { if algorithm != "" && algorithm != k.PublicKey().Type() { return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) @@ -961,13 +1048,16 @@ func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { return s.SignWithAlgorithm(rand, data, s.pubKey.Type()) } +func (s *wrappedSigner) Algorithms() []string { + return algorithmsForKeyFormat(s.pubKey.Type()) +} + func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { if algorithm == "" { algorithm = s.pubKey.Type() } - supportedAlgos := algorithmsForKeyFormat(s.pubKey.Type()) - if !contains(supportedAlgos, algorithm) { + if !contains(s.Algorithms(), algorithm) { return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type()) } @@ -1241,28 +1331,106 @@ func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc { } } +func unencryptedOpenSSHMarshaler(privKeyBlock []byte) ([]byte, string, string, string, error) { + key := generateOpenSSHPadding(privKeyBlock, 8) + return key, "none", "none", "", nil +} + +func passphraseProtectedOpenSSHMarshaler(passphrase []byte) openSSHEncryptFunc { + return func(privKeyBlock []byte) ([]byte, string, string, string, error) { + salt := make([]byte, 16) + if _, err := rand.Read(salt); err != nil { + return nil, "", "", "", err + } + + opts := struct { + Salt []byte + Rounds uint32 + }{salt, 16} + + // Derive key to encrypt the private key block. + k, err := bcrypt_pbkdf.Key(passphrase, salt, int(opts.Rounds), 32+aes.BlockSize) + if err != nil { + return nil, "", "", "", err + } + + // Add padding matching the block size of AES. + keyBlock := generateOpenSSHPadding(privKeyBlock, aes.BlockSize) + + // Encrypt the private key using the derived secret. + + dst := make([]byte, len(keyBlock)) + key, iv := k[:32], k[32:] + block, err := aes.NewCipher(key) + if err != nil { + return nil, "", "", "", err + } + + stream := cipher.NewCTR(block, iv) + stream.XORKeyStream(dst, keyBlock) + + return dst, "aes256-ctr", "bcrypt", string(Marshal(opts)), nil + } +} + +const privateKeyAuthMagic = "openssh-key-v1\x00" + type openSSHDecryptFunc func(CipherName, KdfName, KdfOpts string, PrivKeyBlock []byte) ([]byte, error) +type openSSHEncryptFunc func(PrivKeyBlock []byte) (ProtectedKeyBlock []byte, cipherName, kdfName, kdfOptions string, err error) + +type openSSHEncryptedPrivateKey struct { + CipherName string + KdfName string + KdfOpts string + NumKeys uint32 + PubKey []byte + PrivKeyBlock []byte +} + +type openSSHPrivateKey struct { + Check1 uint32 + Check2 uint32 + Keytype string + Rest []byte `ssh:"rest"` +} + +type openSSHRSAPrivateKey struct { + N *big.Int + E *big.Int + D *big.Int + Iqmp *big.Int + P *big.Int + Q *big.Int + Comment string + Pad []byte `ssh:"rest"` +} + +type openSSHEd25519PrivateKey struct { + Pub []byte + Priv []byte + Comment string + Pad []byte `ssh:"rest"` +} + +type openSSHECDSAPrivateKey struct { + Curve string + Pub []byte + D *big.Int + Comment string + Pad []byte `ssh:"rest"` +} // parseOpenSSHPrivateKey parses an OpenSSH private key, using the decrypt // function to unwrap the encrypted portion. unencryptedOpenSSHKey can be used // as the decrypt function to parse an unencrypted private key. See // https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key. func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.PrivateKey, error) { - const magic = "openssh-key-v1\x00" - if len(key) < len(magic) || string(key[:len(magic)]) != magic { + if len(key) < len(privateKeyAuthMagic) || string(key[:len(privateKeyAuthMagic)]) != privateKeyAuthMagic { return nil, errors.New("ssh: invalid openssh private key format") } - remaining := key[len(magic):] - - var w struct { - CipherName string - KdfName string - KdfOpts string - NumKeys uint32 - PubKey []byte - PrivKeyBlock []byte - } + remaining := key[len(privateKeyAuthMagic):] + var w openSSHEncryptedPrivateKey if err := Unmarshal(remaining, &w); err != nil { return nil, err } @@ -1284,13 +1452,7 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv return nil, err } - pk1 := struct { - Check1 uint32 - Check2 uint32 - Keytype string - Rest []byte `ssh:"rest"` - }{} - + var pk1 openSSHPrivateKey if err := Unmarshal(privKeyBlock, &pk1); err != nil || pk1.Check1 != pk1.Check2 { if w.CipherName != "none" { return nil, x509.IncorrectPasswordError @@ -1300,18 +1462,7 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv switch pk1.Keytype { case KeyAlgoRSA: - // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 - key := struct { - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int - P *big.Int - Q *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - + var key openSSHRSAPrivateKey if err := Unmarshal(pk1.Rest, &key); err != nil { return nil, err } @@ -1337,13 +1488,7 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv return pk, nil case KeyAlgoED25519: - key := struct { - Pub []byte - Priv []byte - Comment string - Pad []byte `ssh:"rest"` - }{} - + var key openSSHEd25519PrivateKey if err := Unmarshal(pk1.Rest, &key); err != nil { return nil, err } @@ -1360,14 +1505,7 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv copy(pk, key.Priv) return &pk, nil case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - key := struct { - Curve string - Pub []byte - D *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - + var key openSSHECDSAPrivateKey if err := Unmarshal(pk1.Rest, &key); err != nil { return nil, err } @@ -1415,6 +1553,131 @@ func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.Priv } } +func marshalOpenSSHPrivateKey(key crypto.PrivateKey, comment string, encrypt openSSHEncryptFunc) (*pem.Block, error) { + var w openSSHEncryptedPrivateKey + var pk1 openSSHPrivateKey + + // Random check bytes. + var check uint32 + if err := binary.Read(rand.Reader, binary.BigEndian, &check); err != nil { + return nil, err + } + + pk1.Check1 = check + pk1.Check2 = check + w.NumKeys = 1 + + // Use a []byte directly on ed25519 keys. + if k, ok := key.(*ed25519.PrivateKey); ok { + key = *k + } + + switch k := key.(type) { + case *rsa.PrivateKey: + E := new(big.Int).SetInt64(int64(k.PublicKey.E)) + // Marshal public key: + // E and N are in reversed order in the public and private key. + pubKey := struct { + KeyType string + E *big.Int + N *big.Int + }{ + KeyAlgoRSA, + E, k.PublicKey.N, + } + w.PubKey = Marshal(pubKey) + + // Marshal private key. + key := openSSHRSAPrivateKey{ + N: k.PublicKey.N, + E: E, + D: k.D, + Iqmp: k.Precomputed.Qinv, + P: k.Primes[0], + Q: k.Primes[1], + Comment: comment, + } + pk1.Keytype = KeyAlgoRSA + pk1.Rest = Marshal(key) + case ed25519.PrivateKey: + pub := make([]byte, ed25519.PublicKeySize) + priv := make([]byte, ed25519.PrivateKeySize) + copy(pub, k[32:]) + copy(priv, k) + + // Marshal public key. + pubKey := struct { + KeyType string + Pub []byte + }{ + KeyAlgoED25519, pub, + } + w.PubKey = Marshal(pubKey) + + // Marshal private key. + key := openSSHEd25519PrivateKey{ + Pub: pub, + Priv: priv, + Comment: comment, + } + pk1.Keytype = KeyAlgoED25519 + pk1.Rest = Marshal(key) + case *ecdsa.PrivateKey: + var curve, keyType string + switch name := k.Curve.Params().Name; name { + case "P-256": + curve = "nistp256" + keyType = KeyAlgoECDSA256 + case "P-384": + curve = "nistp384" + keyType = KeyAlgoECDSA384 + case "P-521": + curve = "nistp521" + keyType = KeyAlgoECDSA521 + default: + return nil, errors.New("ssh: unhandled elliptic curve " + name) + } + + pub := elliptic.Marshal(k.Curve, k.PublicKey.X, k.PublicKey.Y) + + // Marshal public key. + pubKey := struct { + KeyType string + Curve string + Pub []byte + }{ + keyType, curve, pub, + } + w.PubKey = Marshal(pubKey) + + // Marshal private key. + key := openSSHECDSAPrivateKey{ + Curve: curve, + Pub: pub, + D: k.D, + Comment: comment, + } + pk1.Keytype = keyType + pk1.Rest = Marshal(key) + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", k) + } + + var err error + // Add padding and encrypt the key if necessary. + w.PrivKeyBlock, w.CipherName, w.KdfName, w.KdfOpts, err = encrypt(Marshal(pk1)) + if err != nil { + return nil, err + } + + b := Marshal(w) + block := &pem.Block{ + Type: "OPENSSH PRIVATE KEY", + Bytes: append([]byte(privateKeyAuthMagic), b...), + } + return block, nil +} + func checkOpenSSHKeyPadding(pad []byte) error { for i, b := range pad { if int(b) != i+1 { @@ -1424,6 +1687,13 @@ func checkOpenSSHKeyPadding(pad []byte) error { return nil } +func generateOpenSSHPadding(block []byte, blockSize int) []byte { + for i, l := 0, len(block); (l+i)%blockSize != 0; i++ { + block = append(block, byte(i+1)) + } + return block +} + // FingerprintLegacyMD5 returns the user presentation of the key's // fingerprint as described by RFC 4716 section 4. func FingerprintLegacyMD5(pubKey PublicKey) string { diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go index 922032d9..b55f8605 100644 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -349,6 +349,20 @@ type userAuthGSSAPIError struct { LanguageTag string } +// Transport layer OpenSSH extension. See [PROTOCOL], section 1.9 +const msgPing = 192 + +type pingMsg struct { + Data string `sshtype:"192"` +} + +// Transport layer OpenSSH extension. See [PROTOCOL], section 1.9 +const msgPong = 193 + +type pongMsg struct { + Data string `sshtype:"193"` +} + // typeTags returns the possible type bytes for the given reflect.Type, which // should be a struct. The possible values are separated by a '|' character. func typeTags(structType reflect.Type) (tags []byte) { diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go index 9654c018..d2d24c63 100644 --- a/vendor/golang.org/x/crypto/ssh/mux.go +++ b/vendor/golang.org/x/crypto/ssh/mux.go @@ -231,6 +231,12 @@ func (m *mux) onePacket() error { return m.handleChannelOpen(packet) case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: return m.handleGlobalPacket(packet) + case msgPing: + var msg pingMsg + if err := Unmarshal(packet, &msg); err != nil { + return fmt.Errorf("failed to unmarshal ping@openssh.com message: %w", err) + } + return m.sendMessage(pongMsg(msg)) } // assume a channel packet. diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index b21322af..727c71b9 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -576,7 +576,16 @@ userAuthLoop: if !ok || len(payload) > 0 { return nil, parseError(msgUserAuthRequest) } - + // Ensure the declared public key algo is compatible with the + // decoded one. This check will ensure we don't accept e.g. + // ssh-rsa-cert-v01@openssh.com algorithm with ssh-rsa public + // key type. The algorithm and public key type must be + // consistent: both must be certificate algorithms, or neither. + if !contains(algorithmsForKeyFormat(pubKey.Type()), algo) { + authErr = fmt.Errorf("ssh: public key type %q not compatible with selected algorithm %q", + pubKey.Type(), algo) + break + } // Ensure the public key algo and signature algo // are supported. Compare the private key // algorithm name that corresponds to algo with diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 6d5e0088..02c88b6b 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -581,9 +581,11 @@ type serverConn struct { advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client curClientStreams uint32 // number of open streams initiated by the client curPushedStreams uint32 // number of open streams initiated by server push + curHandlers uint32 // number of running handler goroutines maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes streams map[uint32]*stream + unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) @@ -981,6 +983,8 @@ func (sc *serverConn) serve() { return case gracefulShutdownMsg: sc.startGracefulShutdownInternal() + case handlerDoneMsg: + sc.handlerDone() default: panic("unknown timer") } @@ -1020,6 +1024,7 @@ var ( idleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) + handlerDoneMsg = new(serverMessage) ) func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } @@ -1892,9 +1897,11 @@ func (st *stream) copyTrailersToHandlerRequest() { // onReadTimeout is run on its own goroutine (from time.AfterFunc) // when the stream's ReadTimeout has fired. func (st *stream) onReadTimeout() { - // Wrap the ErrDeadlineExceeded to avoid callers depending on us - // returning the bare error. - st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded)) + if st.body != nil { + // Wrap the ErrDeadlineExceeded to avoid callers depending on us + // returning the bare error. + st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded)) + } } // onWriteTimeout is run on its own goroutine (from time.AfterFunc) @@ -2012,13 +2019,10 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout != 0 { sc.conn.SetReadDeadline(time.Time{}) - if st.body != nil { - st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) - } + st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } - go sc.runHandler(rw, req, handler) - return nil + return sc.scheduleHandler(id, rw, req, handler) } func (sc *serverConn) upgradeRequest(req *http.Request) { @@ -2038,6 +2042,10 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { sc.conn.SetReadDeadline(time.Time{}) } + // This is the first request on the connection, + // so start the handler directly rather than going + // through scheduleHandler. + sc.curHandlers++ go sc.runHandler(rw, req, sc.handler.ServeHTTP) } @@ -2278,8 +2286,62 @@ func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *response return &responseWriter{rws: rws} } +type unstartedHandler struct { + streamID uint32 + rw *responseWriter + req *http.Request + handler func(http.ResponseWriter, *http.Request) +} + +// scheduleHandler starts a handler goroutine, +// or schedules one to start as soon as an existing handler finishes. +func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error { + sc.serveG.check() + maxHandlers := sc.advMaxStreams + if sc.curHandlers < maxHandlers { + sc.curHandlers++ + go sc.runHandler(rw, req, handler) + return nil + } + if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) { + return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm)) + } + sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{ + streamID: streamID, + rw: rw, + req: req, + handler: handler, + }) + return nil +} + +func (sc *serverConn) handlerDone() { + sc.serveG.check() + sc.curHandlers-- + i := 0 + maxHandlers := sc.advMaxStreams + for ; i < len(sc.unstartedHandlers); i++ { + u := sc.unstartedHandlers[i] + if sc.streams[u.streamID] == nil { + // This stream was reset before its goroutine had a chance to start. + continue + } + if sc.curHandlers >= maxHandlers { + break + } + sc.curHandlers++ + go sc.runHandler(u.rw, u.req, u.handler) + sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references + } + sc.unstartedHandlers = sc.unstartedHandlers[i:] + if len(sc.unstartedHandlers) == 0 { + sc.unstartedHandlers = nil + } +} + // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { rw.rws.stream.cancelCtx() diff --git a/vendor/golang.org/x/oauth2/deviceauth.go b/vendor/golang.org/x/oauth2/deviceauth.go new file mode 100644 index 00000000..e99c92f3 --- /dev/null +++ b/vendor/golang.org/x/oauth2/deviceauth.go @@ -0,0 +1,198 @@ +package oauth2 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2/internal" +) + +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 +const ( + errAuthorizationPending = "authorization_pending" + errSlowDown = "slow_down" + errAccessDenied = "access_denied" + errExpiredToken = "expired_token" +) + +// DeviceAuthResponse describes a successful RFC 8628 Device Authorization Response +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 +type DeviceAuthResponse struct { + // DeviceCode + DeviceCode string `json:"device_code"` + // UserCode is the code the user should enter at the verification uri + UserCode string `json:"user_code"` + // VerificationURI is where user should enter the user code + VerificationURI string `json:"verification_uri"` + // VerificationURIComplete (if populated) includes the user code in the verification URI. This is typically shown to the user in non-textual form, such as a QR code. + VerificationURIComplete string `json:"verification_uri_complete,omitempty"` + // Expiry is when the device code and user code expire + Expiry time.Time `json:"expires_in,omitempty"` + // Interval is the duration in seconds that Poll should wait between requests + Interval int64 `json:"interval,omitempty"` +} + +func (d DeviceAuthResponse) MarshalJSON() ([]byte, error) { + type Alias DeviceAuthResponse + var expiresIn int64 + if !d.Expiry.IsZero() { + expiresIn = int64(time.Until(d.Expiry).Seconds()) + } + return json.Marshal(&struct { + ExpiresIn int64 `json:"expires_in,omitempty"` + *Alias + }{ + ExpiresIn: expiresIn, + Alias: (*Alias)(&d), + }) + +} + +func (c *DeviceAuthResponse) UnmarshalJSON(data []byte) error { + type Alias DeviceAuthResponse + aux := &struct { + ExpiresIn int64 `json:"expires_in"` + // workaround misspelling of verification_uri + VerificationURL string `json:"verification_url"` + *Alias + }{ + Alias: (*Alias)(c), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ExpiresIn != 0 { + c.Expiry = time.Now().UTC().Add(time.Second * time.Duration(aux.ExpiresIn)) + } + if c.VerificationURI == "" { + c.VerificationURI = aux.VerificationURL + } + return nil +} + +// DeviceAuth returns a device auth struct which contains a device code +// and authorization information provided for users to enter on another device. +func (c *Config) DeviceAuth(ctx context.Context, opts ...AuthCodeOption) (*DeviceAuthResponse, error) { + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.1 + v := url.Values{ + "client_id": {c.ClientID}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveDeviceAuth(ctx, c, v) +} + +func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAuthResponse, error) { + if c.Endpoint.DeviceAuthURL == "" { + return nil, errors.New("endpoint missing DeviceAuthURL") + } + + req, err := http.NewRequest("POST", c.Endpoint.DeviceAuthURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("Accept", "application/json") + + t := time.Now() + r, err := internal.ContextClient(ctx).Do(req) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + da := &DeviceAuthResponse{} + err = json.Unmarshal(body, &da) + if err != nil { + return nil, fmt.Errorf("unmarshal %s", err) + } + + if !da.Expiry.IsZero() { + // Make a small adjustment to account for time taken by the request + da.Expiry = da.Expiry.Add(-time.Since(t)) + } + + return da, nil +} + +// DeviceAccessToken polls the server to exchange a device code for a token. +func (c *Config) DeviceAccessToken(ctx context.Context, da *DeviceAuthResponse, opts ...AuthCodeOption) (*Token, error) { + if !da.Expiry.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, da.Expiry) + defer cancel() + } + + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.4 + v := url.Values{ + "client_id": {c.ClientID}, + "grant_type": {"urn:ietf:params:oauth:grant-type:device_code"}, + "device_code": {da.DeviceCode}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + + // "If no value is provided, clients MUST use 5 as the default." + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 + interval := da.Interval + if interval == 0 { + interval = 5 + } + + ticker := time.NewTicker(time.Duration(interval) * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-ticker.C: + tok, err := retrieveToken(ctx, c, v) + if err == nil { + return tok, nil + } + + e, ok := err.(*RetrieveError) + if !ok { + return nil, err + } + switch e.ErrorCode { + case errSlowDown: + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 + // "the interval MUST be increased by 5 seconds for this and all subsequent requests" + interval += 5 + ticker.Reset(time.Duration(interval) * time.Second) + case errAuthorizationPending: + // Do nothing. + case errAccessDenied, errExpiredToken: + fallthrough + default: + return tok, err + } + } + } +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go index 16c6c6b9..e6158794 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go +++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build appengine -// +build appengine // This file applies to App Engine first generation runtimes (<= Go 1.9). diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go index a7e27b3d..9c79aa0a 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !appengine -// +build !appengine // This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index b3e8783c..12b12a30 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -8,7 +8,6 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "net/http" "os" "path/filepath" @@ -20,7 +19,10 @@ import ( "golang.org/x/oauth2/authhandler" ) -const adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" +const ( + adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" + universeDomainDefault = "googleapis.com" +) // Credentials holds Google credentials, including "Application Default Credentials". // For more details, see: @@ -38,6 +40,18 @@ type Credentials struct { // environment and not with a credentials file, e.g. when code is // running on Google Cloud Platform. JSON []byte + + // universeDomain is the default service domain for a given Cloud universe. + universeDomain string +} + +// UniverseDomain returns the default service domain for a given Cloud universe. +// The default value is "googleapis.com". +func (c *Credentials) UniverseDomain() string { + if c.universeDomain == "" { + return universeDomainDefault + } + return c.universeDomain } // DefaultCredentials is the old name of Credentials. @@ -142,10 +156,8 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar // Second, try a well-known file. filename := wellKnownFile() - if creds, err := readCredentialsFile(ctx, filename, params); err == nil { - return creds, nil - } else if !os.IsNotExist(err) { - return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + if b, err := os.ReadFile(filename); err == nil { + return CredentialsFromJSONWithParams(ctx, b, params) } // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) @@ -203,15 +215,23 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params if err := json.Unmarshal(jsonData, &f); err != nil { return nil, err } + + universeDomain := f.UniverseDomain + // Authorized user credentials are only supported in the googleapis.com universe. + if f.Type == userCredentialsKey { + universeDomain = universeDomainDefault + } + ts, err := f.tokenSource(ctx, params) if err != nil { return nil, err } ts = newErrWrappingTokenSource(ts) return &Credentials{ - ProjectID: f.ProjectID, - TokenSource: ts, - JSON: jsonData, + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: jsonData, + universeDomain: universeDomain, }, nil } @@ -231,7 +251,7 @@ func wellKnownFile() string { } func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*Credentials, error) { - b, err := ioutil.ReadFile(filename) + b, err := os.ReadFile(filename) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index cc122388..c66c5352 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -16,14 +16,16 @@ import ( "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" "golang.org/x/oauth2/google/internal/externalaccount" + "golang.org/x/oauth2/google/internal/externalaccountauthorizeduser" "golang.org/x/oauth2/jwt" ) // Endpoint is Google's OAuth 2.0 default endpoint. var Endpoint = oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://oauth2.googleapis.com/token", - AuthStyle: oauth2.AuthStyleInParams, + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://oauth2.googleapis.com/token", + DeviceAuthURL: "https://oauth2.googleapis.com/device/code", + AuthStyle: oauth2.AuthStyleInParams, } // MTLSTokenURL is Google's OAuth 2.0 default mTLS endpoint. @@ -95,10 +97,11 @@ func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { // JSON key file types. const ( - serviceAccountKey = "service_account" - userCredentialsKey = "authorized_user" - externalAccountKey = "external_account" - impersonatedServiceAccount = "impersonated_service_account" + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" + externalAccountKey = "external_account" + externalAccountAuthorizedUserKey = "external_account_authorized_user" + impersonatedServiceAccount = "impersonated_service_account" ) // credentialsFile is the unmarshalled representation of a credentials file. @@ -106,12 +109,13 @@ type credentialsFile struct { Type string `json:"type"` // Service Account fields - ClientEmail string `json:"client_email"` - PrivateKeyID string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - AuthURL string `json:"auth_uri"` - TokenURL string `json:"token_uri"` - ProjectID string `json:"project_id"` + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + AuthURL string `json:"auth_uri"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + UniverseDomain string `json:"universe_domain"` // User Credential fields // (These typically come from gcloud auth.) @@ -131,6 +135,9 @@ type credentialsFile struct { QuotaProjectID string `json:"quota_project_id"` WorkforcePoolUserProject string `json:"workforce_pool_user_project"` + // External Account Authorized User fields + RevokeURL string `json:"revoke_url"` + // Service account impersonation SourceCredentials *credentialsFile `json:"source_credentials"` } @@ -199,6 +206,19 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar WorkforcePoolUserProject: f.WorkforcePoolUserProject, } return cfg.TokenSource(ctx) + case externalAccountAuthorizedUserKey: + cfg := &externalaccountauthorizeduser.Config{ + Audience: f.Audience, + RefreshToken: f.RefreshToken, + TokenURL: f.TokenURLExternal, + TokenInfoURL: f.TokenInfoURL, + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + RevokeURL: f.RevokeURL, + QuotaProjectID: f.QuotaProjectID, + Scopes: params.Scopes, + } + return cfg.TokenSource(ctx) case impersonatedServiceAccount: if f.ServiceAccountImpersonationURL == "" || f.SourceCredentials == nil { return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials") diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go index 2bf3202b..bd4efd19 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go @@ -274,49 +274,6 @@ type awsRequest struct { Headers []awsRequestHeader `json:"headers"` } -func (cs awsCredentialSource) validateMetadataServers() error { - if err := cs.validateMetadataServer(cs.RegionURL, "region_url"); err != nil { - return err - } - if err := cs.validateMetadataServer(cs.CredVerificationURL, "url"); err != nil { - return err - } - return cs.validateMetadataServer(cs.IMDSv2SessionTokenURL, "imdsv2_session_token_url") -} - -var validHostnames []string = []string{"169.254.169.254", "fd00:ec2::254"} - -func (cs awsCredentialSource) isValidMetadataServer(metadataUrl string) bool { - if metadataUrl == "" { - // Zero value means use default, which is valid. - return true - } - - u, err := url.Parse(metadataUrl) - if err != nil { - // Unparseable URL means invalid - return false - } - - for _, validHostname := range validHostnames { - if u.Hostname() == validHostname { - // If it's one of the valid hostnames, everything is good - return true - } - } - - // hostname not found in our allowlist, so not valid - return false -} - -func (cs awsCredentialSource) validateMetadataServer(metadataUrl, urlName string) error { - if !cs.isValidMetadataServer(metadataUrl) { - return fmt.Errorf("oauth2/google: invalid hostname %s for %s", metadataUrl, urlName) - } - - return nil -} - func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) { if cs.client == nil { cs.client = oauth2.NewClient(cs.ctx, nil) @@ -339,6 +296,10 @@ func shouldUseMetadataServer() bool { return !canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment() } +func (cs awsCredentialSource) credentialSourceType() string { + return "aws" +} + func (cs awsCredentialSource) subjectToken() (string, error) { if cs.requestSigner == nil { headers := make(map[string]string) diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index dcd252a6..33288d36 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -8,13 +8,12 @@ import ( "context" "fmt" "net/http" - "net/url" "regexp" "strconv" - "strings" "time" "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/stsexchange" ) // now aliases time.Now for testing @@ -63,31 +62,10 @@ type Config struct { WorkforcePoolUserProject string } -// Each element consists of a list of patterns. validateURLs checks for matches -// that include all elements in a given list, in that order. - var ( validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) ) -func validateURL(input string, patterns []*regexp.Regexp, scheme string) bool { - parsed, err := url.Parse(input) - if err != nil { - return false - } - if !strings.EqualFold(parsed.Scheme, scheme) { - return false - } - toTest := parsed.Host - - for _, pattern := range patterns { - if pattern.MatchString(toTest) { - return true - } - } - return false -} - func validateWorkforceAudience(input string) bool { return validWorkforceAudiencePattern.MatchString(input) } @@ -185,10 +163,6 @@ func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { awsCredSource.IMDSv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL } - if err := awsCredSource.validateMetadataServers(); err != nil { - return nil, err - } - return awsCredSource, nil } } else if c.CredentialSource.File != "" { @@ -202,6 +176,7 @@ func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { } type baseCredentialSource interface { + credentialSourceType() string subjectToken() (string, error) } @@ -211,6 +186,15 @@ type tokenSource struct { conf *Config } +func getMetricsHeaderValue(conf *Config, credSource baseCredentialSource) string { + return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", + goVersion(), + "unknown", + credSource.credentialSourceType(), + conf.ServiceAccountImpersonationURL != "", + conf.ServiceAccountImpersonationLifetimeSeconds != 0) +} + // Token allows tokenSource to conform to the oauth2.TokenSource interface. func (ts tokenSource) Token() (*oauth2.Token, error) { conf := ts.conf @@ -224,7 +208,7 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { if err != nil { return nil, err } - stsRequest := stsTokenExchangeRequest{ + stsRequest := stsexchange.TokenExchangeRequest{ GrantType: "urn:ietf:params:oauth:grant-type:token-exchange", Audience: conf.Audience, Scope: conf.Scopes, @@ -234,7 +218,8 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { } header := make(http.Header) header.Add("Content-Type", "application/x-www-form-urlencoded") - clientAuth := clientAuthentication{ + header.Add("x-goog-api-client", getMetricsHeaderValue(conf, credSource)) + clientAuth := stsexchange.ClientAuthentication{ AuthStyle: oauth2.AuthStyleInHeader, ClientID: conf.ClientID, ClientSecret: conf.ClientSecret, @@ -247,7 +232,7 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { "userProject": conf.WorkforcePoolUserProject, } } - stsResp, err := exchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) + stsResp, err := stsexchange.ExchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go index 579bcce5..6497dc02 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go @@ -233,6 +233,10 @@ func (cs executableCredentialSource) parseSubjectTokenFromSource(response []byte return "", tokenTypeError(source) } +func (cs executableCredentialSource) credentialSourceType() string { + return "executable" +} + func (cs executableCredentialSource) subjectToken() (string, error) { if token, err := cs.getTokenFromOutputFile(); token != "" || err != nil { return token, err diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go index e953ddb4..f35f73c5 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go @@ -19,6 +19,10 @@ type fileCredentialSource struct { Format format } +func (cs fileCredentialSource) credentialSourceType() string { + return "file" +} + func (cs fileCredentialSource) subjectToken() (string, error) { tokenFile, err := os.Open(cs.File) if err != nil { diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go new file mode 100644 index 00000000..1d5aad2e --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go @@ -0,0 +1,64 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "runtime" + "strings" + "unicode" +) + +var ( + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return "UNKNOWN" +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go index 16dca654..606bb4e8 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go @@ -23,6 +23,10 @@ type urlCredentialSource struct { ctx context.Context } +func (cs urlCredentialSource) credentialSourceType() string { + return "url" +} + func (cs urlCredentialSource) subjectToken() (string, error) { client := oauth2.NewClient(cs.ctx, nil) req, err := http.NewRequest("GET", cs.URL, nil) diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go b/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go new file mode 100644 index 00000000..cb582070 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go @@ -0,0 +1,114 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccountauthorizeduser + +import ( + "context" + "errors" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/stsexchange" +) + +// now aliases time.Now for testing. +var now = func() time.Time { + return time.Now().UTC() +} + +var tokenValid = func(token oauth2.Token) bool { + return token.Valid() +} + +type Config struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workforce pool and + // the provider identifier in that pool. + Audience string + // RefreshToken is the optional OAuth 2.0 refresh token. If specified, credentials can be refreshed. + RefreshToken string + // TokenURL is the optional STS token exchange endpoint for refresh. Must be specified for refresh, can be left as + // None if the token can not be refreshed. + TokenURL string + // TokenInfoURL is the optional STS endpoint URL for token introspection. + TokenInfoURL string + // ClientID is only required in conjunction with ClientSecret, as described above. + ClientID string + // ClientSecret is currently only required if token_info endpoint also needs to be called with the generated GCP + // access token. When provided, STS will be called with additional basic authentication using client_id as username + // and client_secret as password. + ClientSecret string + // Token is the OAuth2.0 access token. Can be nil if refresh information is provided. + Token string + // Expiry is the optional expiration datetime of the OAuth 2.0 access token. + Expiry time.Time + // RevokeURL is the optional STS endpoint URL for revoking tokens. + RevokeURL string + // QuotaProjectID is the optional project ID used for quota and billing. This project may be different from the + // project used to create the credentials. + QuotaProjectID string + Scopes []string +} + +func (c *Config) canRefresh() bool { + return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != "" +} + +func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { + var token oauth2.Token + if c.Token != "" && !c.Expiry.IsZero() { + token = oauth2.Token{ + AccessToken: c.Token, + Expiry: c.Expiry, + TokenType: "Bearer", + } + } + if !tokenValid(token) && !c.canRefresh() { + return nil, errors.New("oauth2/google: Token should be created with fields to make it valid (`token` and `expiry`), or fields to allow it to refresh (`refresh_token`, `token_url`, `client_id`, `client_secret`).") + } + + ts := tokenSource{ + ctx: ctx, + conf: c, + } + + return oauth2.ReuseTokenSource(&token, ts), nil +} + +type tokenSource struct { + ctx context.Context + conf *Config +} + +func (ts tokenSource) Token() (*oauth2.Token, error) { + conf := ts.conf + if !conf.canRefresh() { + return nil, errors.New("oauth2/google: The credentials do not contain the necessary fields need to refresh the access token. You must specify refresh_token, token_url, client_id, and client_secret.") + } + + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: oauth2.AuthStyleInHeader, + ClientID: conf.ClientID, + ClientSecret: conf.ClientSecret, + } + + stsResponse, err := stsexchange.RefreshAccessToken(ts.ctx, conf.TokenURL, conf.RefreshToken, clientAuth, nil) + if err != nil { + return nil, err + } + if stsResponse.ExpiresIn < 0 { + return nil, errors.New("oauth2/google: got invalid expiry from security token service") + } + + if stsResponse.RefreshToken != "" { + conf.RefreshToken = stsResponse.RefreshToken + } + + token := &oauth2.Token{ + AccessToken: stsResponse.AccessToken, + Expiry: now().Add(time.Duration(stsResponse.ExpiresIn) * time.Second), + TokenType: "Bearer", + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go b/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go similarity index 88% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go rename to vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go index 99987ce2..ebd520ea 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go +++ b/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package externalaccount +package stsexchange import ( "encoding/base64" @@ -12,8 +12,8 @@ import ( "golang.org/x/oauth2" ) -// clientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. -type clientAuthentication struct { +// ClientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. +type ClientAuthentication struct { // AuthStyle can be either basic or request-body AuthStyle oauth2.AuthStyle ClientID string @@ -23,7 +23,7 @@ type clientAuthentication struct { // InjectAuthentication is used to add authentication to a Secure Token Service exchange // request. It modifies either the passed url.Values or http.Header depending on the desired // authentication format. -func (c *clientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { +func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { return } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go b/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go similarity index 68% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go rename to vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go index e6fcae5f..1a0bebd1 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go +++ b/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package externalaccount +package stsexchange import ( "context" @@ -18,14 +18,17 @@ import ( "golang.org/x/oauth2" ) -// exchangeToken performs an oauth2 token exchange with the provided endpoint. +func defaultHeader() http.Header { + header := make(http.Header) + header.Add("Content-Type", "application/x-www-form-urlencoded") + return header +} + +// ExchangeToken performs an oauth2 token exchange with the provided endpoint. // The first 4 fields are all mandatory. headers can be used to pass additional // headers beyond the bare minimum required by the token exchange. options can // be used to pass additional JSON-structured options to the remote server. -func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchangeRequest, authentication clientAuthentication, headers http.Header, options map[string]interface{}) (*stsTokenExchangeResponse, error) { - - client := oauth2.NewClient(ctx, nil) - +func ExchangeToken(ctx context.Context, endpoint string, request *TokenExchangeRequest, authentication ClientAuthentication, headers http.Header, options map[string]interface{}) (*Response, error) { data := url.Values{} data.Set("audience", request.Audience) data.Set("grant_type", "urn:ietf:params:oauth:grant-type:token-exchange") @@ -41,13 +44,28 @@ func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchan data.Set("options", string(opts)) } + return makeRequest(ctx, endpoint, data, authentication, headers) +} + +func RefreshAccessToken(ctx context.Context, endpoint string, refreshToken string, authentication ClientAuthentication, headers http.Header) (*Response, error) { + data := url.Values{} + data.Set("grant_type", "refresh_token") + data.Set("refresh_token", refreshToken) + + return makeRequest(ctx, endpoint, data, authentication, headers) +} + +func makeRequest(ctx context.Context, endpoint string, data url.Values, authentication ClientAuthentication, headers http.Header) (*Response, error) { + if headers == nil { + headers = defaultHeader() + } + client := oauth2.NewClient(ctx, nil) authentication.InjectAuthentication(data, headers) encodedData := data.Encode() req, err := http.NewRequest("POST", endpoint, strings.NewReader(encodedData)) if err != nil { return nil, fmt.Errorf("oauth2/google: failed to properly build http request: %v", err) - } req = req.WithContext(ctx) for key, list := range headers { @@ -71,7 +89,7 @@ func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchan if c := resp.StatusCode; c < 200 || c > 299 { return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) } - var stsResp stsTokenExchangeResponse + var stsResp Response err = json.Unmarshal(body, &stsResp) if err != nil { return nil, fmt.Errorf("oauth2/google: failed to unmarshal response body from Secure Token Server: %v", err) @@ -81,8 +99,8 @@ func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchan return &stsResp, nil } -// stsTokenExchangeRequest contains fields necessary to make an oauth2 token exchange. -type stsTokenExchangeRequest struct { +// TokenExchangeRequest contains fields necessary to make an oauth2 token exchange. +type TokenExchangeRequest struct { ActingParty struct { ActorToken string ActorTokenType string @@ -96,8 +114,8 @@ type stsTokenExchangeRequest struct { SubjectTokenType string } -// stsTokenExchangeResponse is used to decode the remote server response during an oauth2 token exchange. -type stsTokenExchangeResponse struct { +// Response is used to decode the remote server response during an oauth2 token exchange. +type Response struct { AccessToken string `json:"access_token"` IssuedTokenType string `json:"issued_token_type"` TokenType string `json:"token_type"` diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go index e1755d1d..d28140f7 100644 --- a/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build appengine -// +build appengine package internal diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go index c0ab196c..14989bea 100644 --- a/vendor/golang.org/x/oauth2/internal/oauth2.go +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -14,7 +14,7 @@ import ( // ParseKey converts the binary contents of a private key file // to an *rsa.PrivateKey. It detects whether the private key is in a -// PEM container or not. If so, it extracts the the private key +// PEM container or not. If so, it extracts the private key // from PEM container before conversion. It only supports PEM // containers with no passphrase. func ParseKey(key []byte) (*rsa.PrivateKey, error) { diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index b4723fca..e83ddeef 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -18,6 +18,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" ) @@ -55,12 +56,18 @@ type Token struct { } // tokenJSON is the struct representing the HTTP response from OAuth2 -// providers returning a token in JSON form. +// providers returning a token or error in JSON form. +// https://datatracker.ietf.org/doc/html/rfc6749#section-5.1 type tokenJSON struct { AccessToken string `json:"access_token"` TokenType string `json:"token_type"` RefreshToken string `json:"refresh_token"` ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number + // error fields + // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` } func (e *tokenJSON) expiry() (t time.Time) { @@ -109,41 +116,60 @@ const ( AuthStyleInHeader AuthStyle = 2 ) -// authStyleCache is the set of tokenURLs we've successfully used via +// LazyAuthStyleCache is a backwards compatibility compromise to let Configs +// have a lazily-initialized AuthStyleCache. +// +// The two users of this, oauth2.Config and oauth2/clientcredentials.Config, +// both would ideally just embed an unexported AuthStyleCache but because both +// were historically allowed to be copied by value we can't retroactively add an +// uncopyable Mutex to them. +// +// We could use an atomic.Pointer, but that was added recently enough (in Go +// 1.18) that we'd break Go 1.17 users where the tests as of 2023-08-03 +// still pass. By using an atomic.Value, it supports both Go 1.17 and +// copying by value, even if that's not ideal. +type LazyAuthStyleCache struct { + v atomic.Value // of *AuthStyleCache +} + +func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { + if c, ok := lc.v.Load().(*AuthStyleCache); ok { + return c + } + c := new(AuthStyleCache) + if !lc.v.CompareAndSwap(nil, c) { + c = lc.v.Load().(*AuthStyleCache) + } + return c +} + +// AuthStyleCache is the set of tokenURLs we've successfully used via // RetrieveToken and which style auth we ended up using. // It's called a cache, but it doesn't (yet?) shrink. It's expected that // the set of OAuth2 servers a program contacts over time is fixed and // small. -var authStyleCache struct { - sync.Mutex - m map[string]AuthStyle // keyed by tokenURL -} - -// ResetAuthCache resets the global authentication style cache used -// for AuthStyleUnknown token requests. -func ResetAuthCache() { - authStyleCache.Lock() - defer authStyleCache.Unlock() - authStyleCache.m = nil +type AuthStyleCache struct { + mu sync.Mutex + m map[string]AuthStyle // keyed by tokenURL } // lookupAuthStyle reports which auth style we last used with tokenURL // when calling RetrieveToken and whether we have ever done so. -func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - style, ok = authStyleCache.m[tokenURL] +func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + style, ok = c.m[tokenURL] return } // setAuthStyle adds an entry to authStyleCache, documented above. -func setAuthStyle(tokenURL string, v AuthStyle) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - if authStyleCache.m == nil { - authStyleCache.m = make(map[string]AuthStyle) +func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { + c.mu.Lock() + defer c.mu.Unlock() + if c.m == nil { + c.m = make(map[string]AuthStyle) } - authStyleCache.m[tokenURL] = v + c.m[tokenURL] = v } // newTokenRequest returns a new *http.Request to retrieve a new token @@ -183,10 +209,10 @@ func cloneURLValues(v url.Values) url.Values { return v2 } -func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) { +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { needsAuthStyleProbe := authStyle == 0 if needsAuthStyleProbe { - if style, ok := lookupAuthStyle(tokenURL); ok { + if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { authStyle = style needsAuthStyleProbe = false } else { @@ -216,7 +242,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, token, err = doTokenRoundTrip(ctx, req) } if needsAuthStyleProbe && err == nil { - setAuthStyle(tokenURL, authStyle) + styleCache.setAuthStyle(tokenURL, authStyle) } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. @@ -236,21 +262,29 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { if err != nil { return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) } - if code := r.StatusCode; code < 200 || code > 299 { - return nil, &RetrieveError{ - Response: r, - Body: body, - } + + failureStatus := r.StatusCode < 200 || r.StatusCode > 299 + retrieveError := &RetrieveError{ + Response: r, + Body: body, + // attempt to populate error detail below } var token *Token content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) switch content { case "application/x-www-form-urlencoded", "text/plain": + // some endpoints return a query string vals, err := url.ParseQuery(string(body)) if err != nil { - return nil, err + if failureStatus { + return nil, retrieveError + } + return nil, fmt.Errorf("oauth2: cannot parse response: %v", err) } + retrieveError.ErrorCode = vals.Get("error") + retrieveError.ErrorDescription = vals.Get("error_description") + retrieveError.ErrorURI = vals.Get("error_uri") token = &Token{ AccessToken: vals.Get("access_token"), TokenType: vals.Get("token_type"), @@ -265,8 +299,14 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { default: var tj tokenJSON if err = json.Unmarshal(body, &tj); err != nil { - return nil, err + if failureStatus { + return nil, retrieveError + } + return nil, fmt.Errorf("oauth2: cannot parse json: %v", err) } + retrieveError.ErrorCode = tj.ErrorCode + retrieveError.ErrorDescription = tj.ErrorDescription + retrieveError.ErrorURI = tj.ErrorURI token = &Token{ AccessToken: tj.AccessToken, TokenType: tj.TokenType, @@ -276,17 +316,37 @@ func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { } json.Unmarshal(body, &token.Raw) // no error checks for optional fields } + // according to spec, servers should respond status 400 in error case + // https://www.rfc-editor.org/rfc/rfc6749#section-5.2 + // but some unorthodox servers respond 200 in error case + if failureStatus || retrieveError.ErrorCode != "" { + return nil, retrieveError + } if token.AccessToken == "" { return nil, errors.New("oauth2: server response missing access_token") } return token, nil } +// mirrors oauth2.RetrieveError type RetrieveError struct { - Response *http.Response - Body []byte + Response *http.Response + Body []byte + ErrorCode string + ErrorDescription string + ErrorURI string } func (r *RetrieveError) Error() string { + if r.ErrorCode != "" { + s := fmt.Sprintf("oauth2: %q", r.ErrorCode) + if r.ErrorDescription != "" { + s += fmt.Sprintf(" %q", r.ErrorDescription) + } + if r.ErrorURI != "" { + s += fmt.Sprintf(" %q", r.ErrorURI) + } + return s + } return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) } diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 9085fabe..90a2c3d6 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -58,6 +58,10 @@ type Config struct { // Scope specifies optional requested permissions. Scopes []string + + // authStyleCache caches which auth style to use when Endpoint.AuthStyle is + // the zero value (AuthStyleAutoDetect). + authStyleCache internal.LazyAuthStyleCache } // A TokenSource is anything that can return a token. @@ -71,8 +75,9 @@ type TokenSource interface { // Endpoint represents an OAuth 2.0 provider's authorization and token // endpoint URLs. type Endpoint struct { - AuthURL string - TokenURL string + AuthURL string + DeviceAuthURL string + TokenURL string // AuthStyle optionally specifies how the endpoint wants the // client ID & client secret sent. The zero value means to @@ -139,15 +144,19 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // AuthCodeURL returns a URL to OAuth 2.0 provider's consent page // that asks for permissions for the required scopes explicitly. // -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-empty string and validate that it matches the -// state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// State is an opaque value used by the client to maintain state between the +// request and callback. The authorization server includes this value when +// redirecting the user agent back to the client. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well // as ApprovalForce. -// It can also be used to pass the PKCE challenge. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// +// To protect against CSRF attacks, opts should include a PKCE challenge +// (S256ChallengeOption). Not all servers support PKCE. An alternative is to +// generate a random state parameter and verify it after exchange. +// See https://datatracker.ietf.org/doc/html/rfc6749#section-10.12 (predating +// PKCE), https://www.oauth.com/oauth2-servers/pkce/ and +// https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { var buf bytes.Buffer buf.WriteString(c.Endpoint.AuthURL) @@ -162,7 +171,6 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { v.Set("scope", strings.Join(c.Scopes, " ")) } if state != "" { - // TODO(light): Docs say never to omit state; don't allow empty. v.Set("state", state) } for _, opt := range opts { @@ -207,10 +215,11 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // The provided context optionally controls which HTTP client is used. See the HTTPClient variable. // // The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). +// calling Exchange, be sure to validate FormValue("state") if you are +// using it to protect against CSRF attacks. // -// Opts may include the PKCE verifier code if previously used in AuthCodeURL. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// If using PKCE to protect against CSRF attacks, opts should include a +// VerifierOption. func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { v := url.Values{ "grant_type": {"authorization_code"}, diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go new file mode 100644 index 00000000..50593b6d --- /dev/null +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -0,0 +1,68 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package oauth2 + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "net/url" +) + +const ( + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + codeVerifierKey = "code_verifier" +) + +// GenerateVerifier generates a PKCE code verifier with 32 octets of randomness. +// This follows recommendations in RFC 7636. +// +// A fresh verifier should be generated for each authorization. +// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL +// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAccessToken). +func GenerateVerifier() string { + // "RECOMMENDED that the output of a suitable random number generator be + // used to create a 32-octet sequence. The octet sequence is then + // base64url-encoded to produce a 43-octet URL-safe string to use as the + // code verifier." + // https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 + data := make([]byte, 32) + if _, err := rand.Read(data); err != nil { + panic(err) + } + return base64.RawURLEncoding.EncodeToString(data) +} + +// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be +// passed to Config.Exchange or Config.DeviceAccessToken only. +func VerifierOption(verifier string) AuthCodeOption { + return setParam{k: codeVerifierKey, v: verifier} +} + +// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. +// +// Prefer to use S256ChallengeOption where possible. +func S256ChallengeFromVerifier(verifier string) string { + sha := sha256.Sum256([]byte(verifier)) + return base64.RawURLEncoding.EncodeToString(sha[:]) +} + +// S256ChallengeOption derives a PKCE code challenge derived from verifier with +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// only. +func S256ChallengeOption(verifier string) AuthCodeOption { + return challengeOption{ + challenge_method: "S256", + challenge: S256ChallengeFromVerifier(verifier), + } +} + +type challengeOption struct{ challenge_method, challenge string } + +func (p challengeOption) setValue(m url.Values) { + m.Set(codeChallengeMethodKey, p.challenge_method) + m.Set(codeChallengeKey, p.challenge) +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 7c64006d..5bbb3321 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -164,7 +164,7 @@ func tokenFromInternal(t *internal.Token) *Token { // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along // with an error.. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle)) + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) if err != nil { if rErr, ok := err.(*internal.RetrieveError); ok { return nil, (*RetrieveError)(rErr) @@ -175,14 +175,31 @@ func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) } // RetrieveError is the error returned when the token endpoint returns a -// non-2XX HTTP status code. +// non-2XX HTTP status code or populates RFC 6749's 'error' parameter. +// https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 type RetrieveError struct { Response *http.Response // Body is the body that was consumed by reading Response.Body. // It may be truncated. Body []byte + // ErrorCode is RFC 6749's 'error' parameter. + ErrorCode string + // ErrorDescription is RFC 6749's 'error_description' parameter. + ErrorDescription string + // ErrorURI is RFC 6749's 'error_uri' parameter. + ErrorURI string } func (r *RetrieveError) Error() string { + if r.ErrorCode != "" { + s := fmt.Sprintf("oauth2: %q", r.ErrorCode) + if r.ErrorDescription != "" { + s += fmt.Sprintf(" %q", r.ErrorDescription) + } + if r.ErrorURI != "" { + s += fmt.Sprintf(" %q", r.ErrorURI) + } + return s + } return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) } diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index cbee7a4e..b18efb74 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -20,7 +20,7 @@ type token struct{} // A zero Group is valid, has no limit on the number of active goroutines, // and does not cancel on error. type Group struct { - cancel func() + cancel func(error) wg sync.WaitGroup @@ -43,7 +43,7 @@ func (g *Group) done() { // returns a non-nil error or the first time Wait returns, whichever occurs // first. func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := withCancelCause(ctx) return &Group{cancel: cancel}, ctx } @@ -52,7 +52,7 @@ func WithContext(ctx context.Context) (*Group, context.Context) { func (g *Group) Wait() error { g.wg.Wait() if g.cancel != nil { - g.cancel() + g.cancel(g.err) } return g.err } @@ -76,7 +76,7 @@ func (g *Group) Go(f func() error) { g.errOnce.Do(func() { g.err = err if g.cancel != nil { - g.cancel() + g.cancel(g.err) } }) } @@ -105,7 +105,7 @@ func (g *Group) TryGo(f func() error) bool { g.errOnce.Do(func() { g.err = err if g.cancel != nil { - g.cancel() + g.cancel(g.err) } }) } diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go new file mode 100644 index 00000000..7d419d37 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/go120.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 +// +build go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + return context.WithCancelCause(parent) +} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go new file mode 100644 index 00000000..1795c18a --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/pre_go120.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.20 +// +build !go1.20 + +package errgroup + +import "context" + +func withCancelCause(parent context.Context) (context.Context, func(error)) { + ctx, cancel := context.WithCancel(parent) + return ctx, func(error) { cancel() } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go index bd6c128a..ff7da60e 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -7,6 +7,6 @@ package cpu -const cacheLineSize = 32 +const cacheLineSize = 64 func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/hwcap_linux.go b/vendor/golang.org/x/sys/cpu/hwcap_linux.go index 1d9d91f3..34e49f95 100644 --- a/vendor/golang.org/x/sys/cpu/hwcap_linux.go +++ b/vendor/golang.org/x/sys/cpu/hwcap_linux.go @@ -5,7 +5,7 @@ package cpu import ( - "io/ioutil" + "os" ) const ( @@ -39,7 +39,7 @@ func readHWCAP() error { return nil } - buf, err := ioutil.ReadFile(procAuxv) + buf, err := os.ReadFile(procAuxv) if err != nil { // e.g. on android /proc/self/auxv is not accessible, so silently // ignore the error and leave Initialized = false. On some diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go new file mode 100644 index 00000000..3bf40fdf --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs.go @@ -0,0 +1,102 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package execabs is a drop-in replacement for os/exec +// that requires PATH lookups to find absolute paths. +// That is, execabs.Command("cmd") runs the same PATH lookup +// as exec.Command("cmd"), but if the result is a path +// which is relative, the Run and Start methods will report +// an error instead of running the executable. +// +// See https://blog.golang.org/path-security for more information +// about when it may be necessary or appropriate to use this package. +package execabs + +import ( + "context" + "fmt" + "os/exec" + "path/filepath" + "reflect" + "unsafe" +) + +// ErrNotFound is the error resulting if a path search failed to find an executable file. +// It is an alias for exec.ErrNotFound. +var ErrNotFound = exec.ErrNotFound + +// Cmd represents an external command being prepared or run. +// It is an alias for exec.Cmd. +type Cmd = exec.Cmd + +// Error is returned by LookPath when it fails to classify a file as an executable. +// It is an alias for exec.Error. +type Error = exec.Error + +// An ExitError reports an unsuccessful exit by a command. +// It is an alias for exec.ExitError. +type ExitError = exec.ExitError + +func relError(file, path string) error { + return fmt.Errorf("%s resolves to executable in current directory (.%c%s)", file, filepath.Separator, path) +} + +// LookPath searches for an executable named file in the directories +// named by the PATH environment variable. If file contains a slash, +// it is tried directly and the PATH is not consulted. The result will be +// an absolute path. +// +// LookPath differs from exec.LookPath in its handling of PATH lookups, +// which are used for file names without slashes. If exec.LookPath's +// PATH lookup would have returned an executable from the current directory, +// LookPath instead returns an error. +func LookPath(file string) (string, error) { + path, err := exec.LookPath(file) + if err != nil && !isGo119ErrDot(err) { + return "", err + } + if filepath.Base(file) == file && !filepath.IsAbs(path) { + return "", relError(file, path) + } + return path, nil +} + +func fixCmd(name string, cmd *exec.Cmd) { + if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) { + // exec.Command was called with a bare binary name and + // exec.LookPath returned a path which is not absolute. + // Set cmd.lookPathErr and clear cmd.Path so that it + // cannot be run. + lookPathErr := (*error)(unsafe.Pointer(reflect.ValueOf(cmd).Elem().FieldByName("lookPathErr").Addr().Pointer())) + if *lookPathErr == nil { + *lookPathErr = relError(name, cmd.Path) + } + cmd.Path = "" + } +} + +// CommandContext is like Command but includes a context. +// +// The provided context is used to kill the process (by calling os.Process.Kill) +// if the context becomes done before the command completes on its own. +func CommandContext(ctx context.Context, name string, arg ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, name, arg...) + fixCmd(name, cmd) + return cmd + +} + +// Command returns the Cmd struct to execute the named program with the given arguments. +// See exec.Command for most details. +// +// Command differs from exec.Command in its handling of PATH lookups, +// which are used when the program name contains no slashes. +// If exec.Command would have returned an exec.Cmd configured to run an +// executable from the current directory, Command instead +// returns an exec.Cmd that will return an error from Start or Run. +func Command(name string, arg ...string) *exec.Cmd { + cmd := exec.Command(name, arg...) + fixCmd(name, cmd) + return cmd +} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go new file mode 100644 index 00000000..2000064a --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs_go118.go @@ -0,0 +1,18 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.19 +// +build !go1.19 + +package execabs + +import "os/exec" + +func isGo119ErrDot(err error) bool { + return false +} + +func isGo119ErrFieldSet(cmd *exec.Cmd) bool { + return false +} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go new file mode 100644 index 00000000..f364b341 --- /dev/null +++ b/vendor/golang.org/x/sys/execabs/execabs_go119.go @@ -0,0 +1,21 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package execabs + +import ( + "errors" + "os/exec" +) + +func isGo119ErrDot(err error) bool { + return errors.Is(err, exec.ErrDot) +} + +func isGo119ErrFieldSet(cmd *exec.Cmd) bool { + return cmd.Err != nil +} diff --git a/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go deleted file mode 100644 index e07899b9..00000000 --- a/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package unsafeheader contains header declarations for the Go runtime's -// slice and string implementations. -// -// This package allows x/sys to use types equivalent to -// reflect.SliceHeader and reflect.StringHeader without introducing -// a dependency on the (relatively heavy) "reflect" package. -package unsafeheader - -import ( - "unsafe" -) - -// Slice is the runtime representation of a slice. -// It cannot be used safely or portably and its representation may change in a later release. -type Slice struct { - Data unsafe.Pointer - Len int - Cap int -} - -// String is the runtime representation of a string. -// It cannot be used safely or portably and its representation may change in a later release. -type String struct { - Data unsafe.Pointer - Len int -} diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go index 39dba6ca..463c3eff 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -7,12 +7,6 @@ package unix -import "unsafe" - func ptrace(request int, pid int, addr uintptr, data uintptr) error { return ptrace1(request, pid, addr, data) } - -func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) error { - return ptrace1Ptr(request, pid, addr, data) -} diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go index 9ea66330..ed0509a0 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -7,12 +7,6 @@ package unix -import "unsafe" - func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { return ENOTSUP } - -func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { - return ENOTSUP -} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 9a6e5aca..e94e6cda 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -487,8 +487,6 @@ func Fsync(fd int) error { //sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys write(fd int, p []byte) (n int, err error) -//sys readlen(fd int, p *byte, np int) (n int, err error) = read -//sys writelen(fd int, p *byte, np int) (n int, err error) = write //sys Dup2(oldfd int, newfd int) (err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = posix_fadvise64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 135cc3cd..59542a89 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -644,189 +644,3 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE - -/* - * Unimplemented - */ -// Profil -// Sigaction -// Sigprocmask -// Getlogin -// Sigpending -// Sigaltstack -// Ioctl -// Reboot -// Execve -// Vfork -// Sbrk -// Sstk -// Ovadvise -// Mincore -// Setitimer -// Swapon -// Select -// Sigsuspend -// Readv -// Writev -// Nfssvc -// Getfh -// Quotactl -// Csops -// Waitid -// Add_profil -// Kdebug_trace -// Sigreturn -// Atsocket -// Kqueue_from_portset_np -// Kqueue_portset -// Getattrlist -// Getdirentriesattr -// Searchfs -// Delete -// Copyfile -// Watchevent -// Waitevent -// Modwatch -// Fsctl -// Initgroups -// Posix_spawn -// Nfsclnt -// Fhopen -// Minherit -// Semsys -// Msgsys -// Shmsys -// Semctl -// Semget -// Semop -// Msgctl -// Msgget -// Msgsnd -// Msgrcv -// Shm_open -// Shm_unlink -// Sem_open -// Sem_close -// Sem_unlink -// Sem_wait -// Sem_trywait -// Sem_post -// Sem_getvalue -// Sem_init -// Sem_destroy -// Open_extended -// Umask_extended -// Stat_extended -// Lstat_extended -// Fstat_extended -// Chmod_extended -// Fchmod_extended -// Access_extended -// Settid -// Gettid -// Setsgroups -// Getsgroups -// Setwgroups -// Getwgroups -// Mkfifo_extended -// Mkdir_extended -// Identitysvc -// Shared_region_check_np -// Shared_region_map_np -// __pthread_mutex_destroy -// __pthread_mutex_init -// __pthread_mutex_lock -// __pthread_mutex_trylock -// __pthread_mutex_unlock -// __pthread_cond_init -// __pthread_cond_destroy -// __pthread_cond_broadcast -// __pthread_cond_signal -// Setsid_with_pid -// __pthread_cond_timedwait -// Aio_fsync -// Aio_return -// Aio_suspend -// Aio_cancel -// Aio_error -// Aio_read -// Aio_write -// Lio_listio -// __pthread_cond_wait -// Iopolicysys -// __pthread_kill -// __pthread_sigmask -// __sigwait -// __disable_threadsignal -// __pthread_markcancel -// __pthread_canceled -// __semwait_signal -// Proc_info -// sendfile -// Stat64_extended -// Lstat64_extended -// Fstat64_extended -// __pthread_chdir -// __pthread_fchdir -// Audit -// Auditon -// Getauid -// Setauid -// Getaudit -// Setaudit -// Getaudit_addr -// Setaudit_addr -// Auditctl -// Bsdthread_create -// Bsdthread_terminate -// Stack_snapshot -// Bsdthread_register -// Workq_open -// Workq_ops -// __mac_execve -// __mac_syscall -// __mac_get_file -// __mac_set_file -// __mac_get_link -// __mac_set_link -// __mac_get_proc -// __mac_set_proc -// __mac_get_fd -// __mac_set_fd -// __mac_get_pid -// __mac_get_lcid -// __mac_get_lctx -// __mac_set_lctx -// Setlcid -// Read_nocancel -// Write_nocancel -// Open_nocancel -// Close_nocancel -// Wait4_nocancel -// Recvmsg_nocancel -// Sendmsg_nocancel -// Recvfrom_nocancel -// Accept_nocancel -// Fcntl_nocancel -// Select_nocancel -// Fsync_nocancel -// Connect_nocancel -// Sigsuspend_nocancel -// Readv_nocancel -// Writev_nocancel -// Sendto_nocancel -// Pread_nocancel -// Pwrite_nocancel -// Waitid_nocancel -// Poll_nocancel -// Msgsnd_nocancel -// Msgrcv_nocancel -// Sem_wait_nocancel -// Aio_suspend_nocancel -// __sigwait_nocancel -// __semwait_signal_nocancel -// __mac_mount -// __mac_get_mount -// __mac_getfsstat diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index 9fa87980..b37310ce 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -47,6 +47,5 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace -//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index f17b8c52..d51ec996 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -47,6 +47,5 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT //sys Lstat(path string, stat *Stat_t) (err error) //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace -//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index d4ce988e..97cb916f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -343,203 +343,5 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) - -/* - * Unimplemented - * TODO(jsing): Update this list for DragonFly. - */ -// Profil -// Sigaction -// Sigprocmask -// Getlogin -// Sigpending -// Sigaltstack -// Reboot -// Execve -// Vfork -// Sbrk -// Sstk -// Ovadvise -// Mincore -// Setitimer -// Swapon -// Select -// Sigsuspend -// Readv -// Writev -// Nfssvc -// Getfh -// Quotactl -// Mount -// Csops -// Waitid -// Add_profil -// Kdebug_trace -// Sigreturn -// Atsocket -// Kqueue_from_portset_np -// Kqueue_portset -// Getattrlist -// Setattrlist -// Getdirentriesattr -// Searchfs -// Delete -// Copyfile -// Watchevent -// Waitevent -// Modwatch -// Getxattr -// Fgetxattr -// Setxattr -// Fsetxattr -// Removexattr -// Fremovexattr -// Listxattr -// Flistxattr -// Fsctl -// Initgroups -// Posix_spawn -// Nfsclnt -// Fhopen -// Minherit -// Semsys -// Msgsys -// Shmsys -// Semctl -// Semget -// Semop -// Msgctl -// Msgget -// Msgsnd -// Msgrcv -// Shmat -// Shmctl -// Shmdt -// Shmget -// Shm_open -// Shm_unlink -// Sem_open -// Sem_close -// Sem_unlink -// Sem_wait -// Sem_trywait -// Sem_post -// Sem_getvalue -// Sem_init -// Sem_destroy -// Open_extended -// Umask_extended -// Stat_extended -// Lstat_extended -// Fstat_extended -// Chmod_extended -// Fchmod_extended -// Access_extended -// Settid -// Gettid -// Setsgroups -// Getsgroups -// Setwgroups -// Getwgroups -// Mkfifo_extended -// Mkdir_extended -// Identitysvc -// Shared_region_check_np -// Shared_region_map_np -// __pthread_mutex_destroy -// __pthread_mutex_init -// __pthread_mutex_lock -// __pthread_mutex_trylock -// __pthread_mutex_unlock -// __pthread_cond_init -// __pthread_cond_destroy -// __pthread_cond_broadcast -// __pthread_cond_signal -// Setsid_with_pid -// __pthread_cond_timedwait -// Aio_fsync -// Aio_return -// Aio_suspend -// Aio_cancel -// Aio_error -// Aio_read -// Aio_write -// Lio_listio -// __pthread_cond_wait -// Iopolicysys -// __pthread_kill -// __pthread_sigmask -// __sigwait -// __disable_threadsignal -// __pthread_markcancel -// __pthread_canceled -// __semwait_signal -// Proc_info -// Stat64_extended -// Lstat64_extended -// Fstat64_extended -// __pthread_chdir -// __pthread_fchdir -// Audit -// Auditon -// Getauid -// Setauid -// Getaudit -// Setaudit -// Getaudit_addr -// Setaudit_addr -// Auditctl -// Bsdthread_create -// Bsdthread_terminate -// Stack_snapshot -// Bsdthread_register -// Workq_open -// Workq_ops -// __mac_execve -// __mac_syscall -// __mac_get_file -// __mac_set_file -// __mac_get_link -// __mac_set_link -// __mac_get_proc -// __mac_set_proc -// __mac_get_fd -// __mac_set_fd -// __mac_get_pid -// __mac_get_lcid -// __mac_get_lctx -// __mac_set_lctx -// Setlcid -// Read_nocancel -// Write_nocancel -// Open_nocancel -// Close_nocancel -// Wait4_nocancel -// Recvmsg_nocancel -// Sendmsg_nocancel -// Recvfrom_nocancel -// Accept_nocancel -// Fcntl_nocancel -// Select_nocancel -// Fsync_nocancel -// Connect_nocancel -// Sigsuspend_nocancel -// Readv_nocancel -// Writev_nocancel -// Sendto_nocancel -// Pread_nocancel -// Pwrite_nocancel -// Waitid_nocancel -// Msgsnd_nocancel -// Msgrcv_nocancel -// Sem_wait_nocancel -// Aio_suspend_nocancel -// __sigwait_nocancel -// __semwait_signal_nocancel -// __mac_mount -// __mac_get_mount -// __mac_getfsstat diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index afb10106..64d1bb4d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -449,197 +449,5 @@ func Dup3(oldfd, newfd, flags int) error { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) - -/* - * Unimplemented - */ -// Profil -// Sigaction -// Sigprocmask -// Getlogin -// Sigpending -// Sigaltstack -// Ioctl -// Reboot -// Execve -// Vfork -// Sbrk -// Sstk -// Ovadvise -// Mincore -// Setitimer -// Swapon -// Select -// Sigsuspend -// Readv -// Writev -// Nfssvc -// Getfh -// Quotactl -// Mount -// Csops -// Waitid -// Add_profil -// Kdebug_trace -// Sigreturn -// Atsocket -// Kqueue_from_portset_np -// Kqueue_portset -// Getattrlist -// Setattrlist -// Getdents -// Getdirentriesattr -// Searchfs -// Delete -// Copyfile -// Watchevent -// Waitevent -// Modwatch -// Fsctl -// Initgroups -// Posix_spawn -// Nfsclnt -// Fhopen -// Minherit -// Semsys -// Msgsys -// Shmsys -// Semctl -// Semget -// Semop -// Msgctl -// Msgget -// Msgsnd -// Msgrcv -// Shmat -// Shmctl -// Shmdt -// Shmget -// Shm_open -// Shm_unlink -// Sem_open -// Sem_close -// Sem_unlink -// Sem_wait -// Sem_trywait -// Sem_post -// Sem_getvalue -// Sem_init -// Sem_destroy -// Open_extended -// Umask_extended -// Stat_extended -// Lstat_extended -// Fstat_extended -// Chmod_extended -// Fchmod_extended -// Access_extended -// Settid -// Gettid -// Setsgroups -// Getsgroups -// Setwgroups -// Getwgroups -// Mkfifo_extended -// Mkdir_extended -// Identitysvc -// Shared_region_check_np -// Shared_region_map_np -// __pthread_mutex_destroy -// __pthread_mutex_init -// __pthread_mutex_lock -// __pthread_mutex_trylock -// __pthread_mutex_unlock -// __pthread_cond_init -// __pthread_cond_destroy -// __pthread_cond_broadcast -// __pthread_cond_signal -// Setsid_with_pid -// __pthread_cond_timedwait -// Aio_fsync -// Aio_return -// Aio_suspend -// Aio_cancel -// Aio_error -// Aio_read -// Aio_write -// Lio_listio -// __pthread_cond_wait -// Iopolicysys -// __pthread_kill -// __pthread_sigmask -// __sigwait -// __disable_threadsignal -// __pthread_markcancel -// __pthread_canceled -// __semwait_signal -// Proc_info -// Stat64_extended -// Lstat64_extended -// Fstat64_extended -// __pthread_chdir -// __pthread_fchdir -// Audit -// Auditon -// Getauid -// Setauid -// Getaudit -// Setaudit -// Getaudit_addr -// Setaudit_addr -// Auditctl -// Bsdthread_create -// Bsdthread_terminate -// Stack_snapshot -// Bsdthread_register -// Workq_open -// Workq_ops -// __mac_execve -// __mac_syscall -// __mac_get_file -// __mac_set_file -// __mac_get_link -// __mac_set_link -// __mac_get_proc -// __mac_set_proc -// __mac_get_fd -// __mac_set_fd -// __mac_get_pid -// __mac_get_lcid -// __mac_get_lctx -// __mac_set_lctx -// Setlcid -// Read_nocancel -// Write_nocancel -// Open_nocancel -// Close_nocancel -// Wait4_nocancel -// Recvmsg_nocancel -// Sendmsg_nocancel -// Recvfrom_nocancel -// Accept_nocancel -// Fcntl_nocancel -// Select_nocancel -// Fsync_nocancel -// Connect_nocancel -// Sigsuspend_nocancel -// Readv_nocancel -// Writev_nocancel -// Sendto_nocancel -// Pread_nocancel -// Pwrite_nocancel -// Waitid_nocancel -// Poll_nocancel -// Msgsnd_nocancel -// Msgrcv_nocancel -// Sem_wait_nocancel -// Aio_suspend_nocancel -// __sigwait_nocancel -// __semwait_signal_nocancel -// __mac_mount -// __mac_get_mount -// __mac_getfsstat diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 0ba03019..fb4e5022 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -693,10 +693,10 @@ type SockaddrALG struct { func (sa *SockaddrALG) sockaddr() (unsafe.Pointer, _Socklen, error) { // Leave room for NUL byte terminator. - if len(sa.Type) > 13 { + if len(sa.Type) > len(sa.raw.Type)-1 { return nil, 0, EINVAL } - if len(sa.Name) > 63 { + if len(sa.Name) > len(sa.raw.Name)-1 { return nil, 0, EINVAL } @@ -704,17 +704,8 @@ func (sa *SockaddrALG) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Feat = sa.Feature sa.raw.Mask = sa.Mask - typ, err := ByteSliceFromString(sa.Type) - if err != nil { - return nil, 0, err - } - name, err := ByteSliceFromString(sa.Name) - if err != nil { - return nil, 0, err - } - - copy(sa.raw.Type[:], typ) - copy(sa.raw.Name[:], name) + copy(sa.raw.Type[:], sa.Type) + copy(sa.raw.Name[:], sa.Name) return unsafe.Pointer(&sa.raw), SizeofSockaddrALG, nil } @@ -1988,8 +1979,6 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { //sys Unshare(flags int) (err error) //sys write(fd int, p []byte) (n int, err error) //sys exitThread(code int) (err error) = SYS_EXIT -//sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ -//sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE //sys readv(fd int, iovs []Iovec) (n int, err error) = SYS_READV //sys writev(fd int, iovs []Iovec) (n int, err error) = SYS_WRITEV //sys preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) = SYS_PREADV @@ -2493,99 +2482,3 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { } return attr, nil } - -/* - * Unimplemented - */ -// AfsSyscall -// ArchPrctl -// Brk -// ClockNanosleep -// ClockSettime -// Clone -// EpollCtlOld -// EpollPwait -// EpollWaitOld -// Execve -// Fork -// Futex -// GetKernelSyms -// GetMempolicy -// GetRobustList -// GetThreadArea -// Getpmsg -// IoCancel -// IoDestroy -// IoGetevents -// IoSetup -// IoSubmit -// IoprioGet -// IoprioSet -// KexecLoad -// LookupDcookie -// Mbind -// MigratePages -// Mincore -// ModifyLdt -// Mount -// MovePages -// MqGetsetattr -// MqNotify -// MqOpen -// MqTimedreceive -// MqTimedsend -// MqUnlink -// Msgctl -// Msgget -// Msgrcv -// Msgsnd -// Nfsservctl -// Personality -// Pselect6 -// Ptrace -// Putpmsg -// Quotactl -// Readahead -// Readv -// RemapFilePages -// RestartSyscall -// RtSigaction -// RtSigpending -// RtSigqueueinfo -// RtSigreturn -// RtSigsuspend -// RtSigtimedwait -// SchedGetPriorityMax -// SchedGetPriorityMin -// SchedGetparam -// SchedGetscheduler -// SchedRrGetInterval -// SchedSetparam -// SchedYield -// Security -// Semctl -// Semget -// Semop -// Semtimedop -// SetMempolicy -// SetRobustList -// SetThreadArea -// SetTidAddress -// Sigaltstack -// Swapoff -// Swapon -// Sysfs -// TimerCreate -// TimerDelete -// TimerGetoverrun -// TimerGettime -// TimerSettime -// Tkill (obsolete) -// Tuxcall -// Umount2 -// Uselib -// Utimensat -// Vfork -// Vhangup -// Vserver -// _Sysctl diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index ddd1ac85..88162099 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -356,8 +356,6 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) const ( @@ -371,262 +369,3 @@ const ( func mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (uintptr, error) { return mremapNetBSD(oldaddr, oldlength, newaddr, newlength, flags) } - -/* - * Unimplemented - */ -// ____semctl13 -// __clone -// __fhopen40 -// __fhstat40 -// __fhstatvfs140 -// __fstat30 -// __getcwd -// __getfh30 -// __getlogin -// __lstat30 -// __mount50 -// __msgctl13 -// __msync13 -// __ntp_gettime30 -// __posix_chown -// __posix_fchown -// __posix_lchown -// __posix_rename -// __setlogin -// __shmctl13 -// __sigaction_sigtramp -// __sigaltstack14 -// __sigpending14 -// __sigprocmask14 -// __sigsuspend14 -// __sigtimedwait -// __stat30 -// __syscall -// __vfork14 -// _ksem_close -// _ksem_destroy -// _ksem_getvalue -// _ksem_init -// _ksem_open -// _ksem_post -// _ksem_trywait -// _ksem_unlink -// _ksem_wait -// _lwp_continue -// _lwp_create -// _lwp_ctl -// _lwp_detach -// _lwp_exit -// _lwp_getname -// _lwp_getprivate -// _lwp_kill -// _lwp_park -// _lwp_self -// _lwp_setname -// _lwp_setprivate -// _lwp_suspend -// _lwp_unpark -// _lwp_unpark_all -// _lwp_wait -// _lwp_wakeup -// _pset_bind -// _sched_getaffinity -// _sched_getparam -// _sched_setaffinity -// _sched_setparam -// acct -// aio_cancel -// aio_error -// aio_fsync -// aio_read -// aio_return -// aio_suspend -// aio_write -// break -// clock_getres -// clock_gettime -// clock_settime -// compat_09_ogetdomainname -// compat_09_osetdomainname -// compat_09_ouname -// compat_10_omsgsys -// compat_10_osemsys -// compat_10_oshmsys -// compat_12_fstat12 -// compat_12_getdirentries -// compat_12_lstat12 -// compat_12_msync -// compat_12_oreboot -// compat_12_oswapon -// compat_12_stat12 -// compat_13_sigaction13 -// compat_13_sigaltstack13 -// compat_13_sigpending13 -// compat_13_sigprocmask13 -// compat_13_sigreturn13 -// compat_13_sigsuspend13 -// compat_14___semctl -// compat_14_msgctl -// compat_14_shmctl -// compat_16___sigaction14 -// compat_16___sigreturn14 -// compat_20_fhstatfs -// compat_20_fstatfs -// compat_20_getfsstat -// compat_20_statfs -// compat_30___fhstat30 -// compat_30___fstat13 -// compat_30___lstat13 -// compat_30___stat13 -// compat_30_fhopen -// compat_30_fhstat -// compat_30_fhstatvfs1 -// compat_30_getdents -// compat_30_getfh -// compat_30_ntp_gettime -// compat_30_socket -// compat_40_mount -// compat_43_fstat43 -// compat_43_lstat43 -// compat_43_oaccept -// compat_43_ocreat -// compat_43_oftruncate -// compat_43_ogetdirentries -// compat_43_ogetdtablesize -// compat_43_ogethostid -// compat_43_ogethostname -// compat_43_ogetkerninfo -// compat_43_ogetpagesize -// compat_43_ogetpeername -// compat_43_ogetrlimit -// compat_43_ogetsockname -// compat_43_okillpg -// compat_43_olseek -// compat_43_ommap -// compat_43_oquota -// compat_43_orecv -// compat_43_orecvfrom -// compat_43_orecvmsg -// compat_43_osend -// compat_43_osendmsg -// compat_43_osethostid -// compat_43_osethostname -// compat_43_osigblock -// compat_43_osigsetmask -// compat_43_osigstack -// compat_43_osigvec -// compat_43_otruncate -// compat_43_owait -// compat_43_stat43 -// execve -// extattr_delete_fd -// extattr_delete_file -// extattr_delete_link -// extattr_get_fd -// extattr_get_file -// extattr_get_link -// extattr_list_fd -// extattr_list_file -// extattr_list_link -// extattr_set_fd -// extattr_set_file -// extattr_set_link -// extattrctl -// fchroot -// fdatasync -// fgetxattr -// fktrace -// flistxattr -// fork -// fremovexattr -// fsetxattr -// fstatvfs1 -// fsync_range -// getcontext -// getitimer -// getvfsstat -// getxattr -// ktrace -// lchflags -// lchmod -// lfs_bmapv -// lfs_markv -// lfs_segclean -// lfs_segwait -// lgetxattr -// lio_listio -// listxattr -// llistxattr -// lremovexattr -// lseek -// lsetxattr -// lutimes -// madvise -// mincore -// minherit -// modctl -// mq_close -// mq_getattr -// mq_notify -// mq_open -// mq_receive -// mq_send -// mq_setattr -// mq_timedreceive -// mq_timedsend -// mq_unlink -// msgget -// msgrcv -// msgsnd -// nfssvc -// ntp_adjtime -// pmc_control -// pmc_get_info -// pollts -// preadv -// profil -// pselect -// pset_assign -// pset_create -// pset_destroy -// ptrace -// pwritev -// quotactl -// rasctl -// readv -// reboot -// removexattr -// sa_enable -// sa_preempt -// sa_register -// sa_setconcurrency -// sa_stacks -// sa_yield -// sbrk -// sched_yield -// semconfig -// semget -// semop -// setcontext -// setitimer -// setxattr -// shmat -// shmdt -// shmget -// sstk -// statvfs1 -// swapctl -// sysarch -// syscall -// timer_create -// timer_delete -// timer_getoverrun -// timer_gettime -// timer_settime -// undelete -// utrace -// uuidgen -// vadvise -// vfork -// writev diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index c5f166a1..6f34479b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -326,78 +326,4 @@ func Uname(uname *Utsname) error { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) - -/* - * Unimplemented - */ -// __getcwd -// __semctl -// __syscall -// __sysctl -// adjfreq -// break -// clock_getres -// clock_gettime -// clock_settime -// closefrom -// execve -// fhopen -// fhstat -// fhstatfs -// fork -// futimens -// getfh -// getgid -// getitimer -// getlogin -// getthrid -// ktrace -// lfs_bmapv -// lfs_markv -// lfs_segclean -// lfs_segwait -// mincore -// minherit -// mount -// mquery -// msgctl -// msgget -// msgrcv -// msgsnd -// nfssvc -// nnpfspioctl -// preadv -// profil -// pwritev -// quotactl -// readv -// reboot -// renameat -// rfork -// sched_yield -// semget -// semop -// setgroups -// setitimer -// setsockopt -// shmat -// shmctl -// shmdt -// shmget -// sigaction -// sigaltstack -// sigpending -// sigprocmask -// sigreturn -// sigsuspend -// sysarch -// syscall -// threxit -// thrsigdivert -// thrsleep -// thrwakeup -// vfork -// writev diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 72d23575..b99cfa13 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -698,24 +698,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) = libsocket.setsockopt //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = libsocket.recvfrom -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0) - n = int(r0) - if e1 != 0 { - err = e1 - } - return -} - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0) - n = int(r0) - if e1 != 0 { - err = e1 - } - return -} - // Event Ports type fileObjCookie struct { diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 44e72edb..4596d041 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -192,7 +192,6 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys read(fd int, p []byte) (n int, err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ //sys write(fd int, p []byte) (n int, err error) //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = SYS___ACCEPT_A diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 0787a043..f9c7f479 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -2421,6 +2421,15 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_RISCV_V_GET_CONTROL = 0x46 + PR_RISCV_V_SET_CONTROL = 0x45 + PR_RISCV_V_VSTATE_CTRL_CUR_MASK = 0x3 + PR_RISCV_V_VSTATE_CTRL_DEFAULT = 0x0 + PR_RISCV_V_VSTATE_CTRL_INHERIT = 0x10 + PR_RISCV_V_VSTATE_CTRL_MASK = 0x1f + PR_RISCV_V_VSTATE_CTRL_NEXT_MASK = 0xc + PR_RISCV_V_VSTATE_CTRL_OFF = 0x1 + PR_RISCV_V_VSTATE_CTRL_ON = 0x2 PR_SCHED_CORE = 0x3e PR_SCHED_CORE_CREATE = 0x1 PR_SCHED_CORE_GET = 0x0 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index cfb14300..30aee00a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -326,10 +326,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index df64f2d5..8ebfa512 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -327,10 +327,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 3025cd5b..271a21cd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -333,10 +333,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 09e1ffbe..910c330a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -323,10 +323,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index a4572354..a640798c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -118,6 +118,8 @@ const ( IUCLC = 0x200 IXOFF = 0x1000 IXON = 0x400 + LASX_CTX_MAGIC = 0x41535801 + LSX_CTX_MAGIC = 0x53580001 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 @@ -317,10 +319,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index fee7dfb8..0d5925d3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -326,10 +326,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index a5b2373a..d72a00e0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -326,10 +326,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 5dde82c9..02ba129f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -326,10 +326,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 2e80ea6b..8daa6dd9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -326,10 +326,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index a65dcd7c..63c8fa2f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -381,10 +381,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index cbd34e3d..930799ec 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -385,10 +385,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e4afa7a3..8605a7dd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -385,10 +385,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 44f45a03..95a016f1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -314,10 +314,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 74733e26..1ae0108f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -389,10 +389,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index f5f3934b..1bb7c633 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -428,10 +428,12 @@ const ( SO_NOFCS = 0x27 SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 + SO_PASSPIDFD = 0x55 SO_PASSSEC = 0x1f SO_PEEK_OFF = 0x26 SO_PEERCRED = 0x40 SO_PEERGROUPS = 0x3d + SO_PEERPIDFD = 0x56 SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x48 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index 9a257219..d1d1d233 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -817,28 +817,6 @@ func write(fd int, p []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, er := C.read(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(p))), C.size_t(np)) - n = int(r0) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, er := C.write(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(p))), C.size_t(np)) - n = int(r0) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Dup2(oldfd int, newfd int) (err error) { r0, er := C.dup2(C.int(oldfd), C.int(newfd)) if r0 == -1 && er != nil { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index 6de80c20..f99a18ad 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -762,28 +762,6 @@ func write(fd int, p []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, e1 := callread(fd, uintptr(unsafe.Pointer(p)), np) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, e1 := callwrite(fd, uintptr(unsafe.Pointer(p)), np) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Dup2(oldfd int, newfd int) (err error) { _, e1 := calldup2(oldfd, newfd) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 4037ccf7..1cad561e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -725,6 +725,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -733,10 +739,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2410,28 +2412,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { @@ -2521,14 +2501,6 @@ func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { return } -func ptrace1Ptr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { - _, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), addr, uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - var libc_ptrace_trampoline_addr uintptr //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 4baaed0b..8b8bb284 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -5,703 +5,586 @@ TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fdopendir(SB) - GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_closedir(SB) - GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readdir_r(SB) - GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) - GLOBL ·libc_pipe_trampoline_addr(SB), RODATA, $8 DATA ·libc_pipe_trampoline_addr(SB)/8, $libc_pipe_trampoline<>(SB) TEXT libc_getxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getxattr(SB) - GLOBL ·libc_getxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_getxattr_trampoline_addr(SB)/8, $libc_getxattr_trampoline<>(SB) TEXT libc_fgetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fgetxattr(SB) - GLOBL ·libc_fgetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fgetxattr_trampoline_addr(SB)/8, $libc_fgetxattr_trampoline<>(SB) TEXT libc_setxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setxattr(SB) - GLOBL ·libc_setxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_setxattr_trampoline_addr(SB)/8, $libc_setxattr_trampoline<>(SB) TEXT libc_fsetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsetxattr(SB) - GLOBL ·libc_fsetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsetxattr_trampoline_addr(SB)/8, $libc_fsetxattr_trampoline<>(SB) TEXT libc_removexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_removexattr(SB) - GLOBL ·libc_removexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_removexattr_trampoline_addr(SB)/8, $libc_removexattr_trampoline<>(SB) TEXT libc_fremovexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fremovexattr(SB) - GLOBL ·libc_fremovexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fremovexattr_trampoline_addr(SB)/8, $libc_fremovexattr_trampoline<>(SB) TEXT libc_listxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listxattr(SB) - GLOBL ·libc_listxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_listxattr_trampoline_addr(SB)/8, $libc_listxattr_trampoline<>(SB) TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) - GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) - GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) - GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB) TEXT libc_shmat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmat(SB) - GLOBL ·libc_shmat_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmat_trampoline_addr(SB)/8, $libc_shmat_trampoline<>(SB) TEXT libc_shmctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmctl(SB) - GLOBL ·libc_shmctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmctl_trampoline_addr(SB)/8, $libc_shmctl_trampoline<>(SB) TEXT libc_shmdt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmdt(SB) - GLOBL ·libc_shmdt_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmdt_trampoline_addr(SB)/8, $libc_shmdt_trampoline<>(SB) TEXT libc_shmget_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmget(SB) - GLOBL ·libc_shmget_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmget_trampoline_addr(SB)/8, $libc_shmget_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) - GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) TEXT libc_clonefile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefile(SB) - GLOBL ·libc_clonefile_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefile_trampoline_addr(SB)/8, $libc_clonefile_trampoline<>(SB) TEXT libc_clonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefileat(SB) - GLOBL ·libc_clonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefileat_trampoline_addr(SB)/8, $libc_clonefileat_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) TEXT libc_exchangedata_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exchangedata(SB) - GLOBL ·libc_exchangedata_trampoline_addr(SB), RODATA, $8 DATA ·libc_exchangedata_trampoline_addr(SB)/8, $libc_exchangedata_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) TEXT libc_fclonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fclonefileat(SB) - GLOBL ·libc_fclonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fclonefileat_trampoline_addr(SB)/8, $libc_fclonefileat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) TEXT libc_getdtablesize_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) - GLOBL ·libc_getdtablesize_trampoline_addr(SB), RODATA, $8 DATA ·libc_getdtablesize_trampoline_addr(SB)/8, $libc_getdtablesize_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mount(SB) - GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) @@ -712,192 +595,160 @@ DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) TEXT libc_setprivexec_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setprivexec(SB) - GLOBL ·libc_setprivexec_trampoline_addr(SB), RODATA, $8 DATA ·libc_setprivexec_trampoline_addr(SB)/8, $libc_setprivexec_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) TEXT libc_undelete_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_undelete(SB) - GLOBL ·libc_undelete_trampoline_addr(SB), RODATA, $8 DATA ·libc_undelete_trampoline_addr(SB)/8, $libc_undelete_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) - GLOBL ·libc_fstat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstat64_trampoline_addr(SB)/8, $libc_fstat64_trampoline<>(SB) TEXT libc_fstatat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat64(SB) - GLOBL ·libc_fstatat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatat64_trampoline_addr(SB)/8, $libc_fstatat64_trampoline<>(SB) TEXT libc_fstatfs64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs64(SB) - GLOBL ·libc_fstatfs64_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatfs64_trampoline_addr(SB)/8, $libc_fstatfs64_trampoline<>(SB) TEXT libc_getfsstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) - GLOBL ·libc_getfsstat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_getfsstat64_trampoline_addr(SB)/8, $libc_getfsstat64_trampoline<>(SB) TEXT libc_lstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat64(SB) - GLOBL ·libc_lstat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_lstat64_trampoline_addr(SB)/8, $libc_lstat64_trampoline<>(SB) TEXT libc_ptrace_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) - GLOBL ·libc_ptrace_trampoline_addr(SB), RODATA, $8 DATA ·libc_ptrace_trampoline_addr(SB)/8, $libc_ptrace_trampoline<>(SB) TEXT libc_stat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat64(SB) - GLOBL ·libc_stat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_stat64_trampoline_addr(SB)/8, $libc_stat64_trampoline<>(SB) TEXT libc_statfs64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs64(SB) - GLOBL ·libc_statfs64_trampoline_addr(SB), RODATA, $8 DATA ·libc_statfs64_trampoline_addr(SB)/8, $libc_statfs64_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 51d6f3fb..b18edbd0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -725,6 +725,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -733,10 +739,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2410,28 +2412,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { @@ -2521,14 +2501,6 @@ func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { return } -func ptrace1Ptr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { - _, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), addr, uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - var libc_ptrace_trampoline_addr uintptr //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index c3b82c03..08362c1a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -5,703 +5,586 @@ TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fdopendir(SB) - GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_closedir(SB) - GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readdir_r(SB) - GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) - GLOBL ·libc_pipe_trampoline_addr(SB), RODATA, $8 DATA ·libc_pipe_trampoline_addr(SB)/8, $libc_pipe_trampoline<>(SB) TEXT libc_getxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getxattr(SB) - GLOBL ·libc_getxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_getxattr_trampoline_addr(SB)/8, $libc_getxattr_trampoline<>(SB) TEXT libc_fgetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fgetxattr(SB) - GLOBL ·libc_fgetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fgetxattr_trampoline_addr(SB)/8, $libc_fgetxattr_trampoline<>(SB) TEXT libc_setxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setxattr(SB) - GLOBL ·libc_setxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_setxattr_trampoline_addr(SB)/8, $libc_setxattr_trampoline<>(SB) TEXT libc_fsetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsetxattr(SB) - GLOBL ·libc_fsetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsetxattr_trampoline_addr(SB)/8, $libc_fsetxattr_trampoline<>(SB) TEXT libc_removexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_removexattr(SB) - GLOBL ·libc_removexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_removexattr_trampoline_addr(SB)/8, $libc_removexattr_trampoline<>(SB) TEXT libc_fremovexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fremovexattr(SB) - GLOBL ·libc_fremovexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fremovexattr_trampoline_addr(SB)/8, $libc_fremovexattr_trampoline<>(SB) TEXT libc_listxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listxattr(SB) - GLOBL ·libc_listxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_listxattr_trampoline_addr(SB)/8, $libc_listxattr_trampoline<>(SB) TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) - GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) - GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) - GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB) TEXT libc_shmat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmat(SB) - GLOBL ·libc_shmat_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmat_trampoline_addr(SB)/8, $libc_shmat_trampoline<>(SB) TEXT libc_shmctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmctl(SB) - GLOBL ·libc_shmctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmctl_trampoline_addr(SB)/8, $libc_shmctl_trampoline<>(SB) TEXT libc_shmdt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmdt(SB) - GLOBL ·libc_shmdt_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmdt_trampoline_addr(SB)/8, $libc_shmdt_trampoline<>(SB) TEXT libc_shmget_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmget(SB) - GLOBL ·libc_shmget_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmget_trampoline_addr(SB)/8, $libc_shmget_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) - GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) TEXT libc_clonefile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefile(SB) - GLOBL ·libc_clonefile_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefile_trampoline_addr(SB)/8, $libc_clonefile_trampoline<>(SB) TEXT libc_clonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefileat(SB) - GLOBL ·libc_clonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefileat_trampoline_addr(SB)/8, $libc_clonefileat_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) TEXT libc_exchangedata_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exchangedata(SB) - GLOBL ·libc_exchangedata_trampoline_addr(SB), RODATA, $8 DATA ·libc_exchangedata_trampoline_addr(SB)/8, $libc_exchangedata_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) TEXT libc_fclonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fclonefileat(SB) - GLOBL ·libc_fclonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fclonefileat_trampoline_addr(SB)/8, $libc_fclonefileat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) TEXT libc_getdtablesize_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) - GLOBL ·libc_getdtablesize_trampoline_addr(SB), RODATA, $8 DATA ·libc_getdtablesize_trampoline_addr(SB)/8, $libc_getdtablesize_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mount(SB) - GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) @@ -712,192 +595,160 @@ DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) TEXT libc_setprivexec_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setprivexec(SB) - GLOBL ·libc_setprivexec_trampoline_addr(SB), RODATA, $8 DATA ·libc_setprivexec_trampoline_addr(SB)/8, $libc_setprivexec_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) TEXT libc_undelete_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_undelete(SB) - GLOBL ·libc_undelete_trampoline_addr(SB), RODATA, $8 DATA ·libc_undelete_trampoline_addr(SB)/8, $libc_undelete_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) - GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) - GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) - GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getfsstat(SB) - GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) - GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) TEXT libc_ptrace_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) - GLOBL ·libc_ptrace_trampoline_addr(SB), RODATA, $8 DATA ·libc_ptrace_trampoline_addr(SB)/8, $libc_ptrace_trampoline<>(SB) TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat(SB) - GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) - GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 0eabac7a..0c67df64 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -1642,28 +1642,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index ee313eb0..e6e05d14 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -1862,28 +1862,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 4c986e44..7508acca 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -1862,28 +1862,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 55521694..7b56aead 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -1862,28 +1862,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 67a226fb..cc623dca 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -1862,28 +1862,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index f0b9ddaa..58184919 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -1862,28 +1862,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index b57c7050..6be25cd1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -40,7 +40,7 @@ func readv(fd int, iovs []Iovec) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procreadv)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -55,7 +55,7 @@ func preadv(fd int, iovs []Iovec, off int64) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpreadv)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -70,7 +70,7 @@ func writev(fd int, iovs []Iovec) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwritev)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -85,7 +85,7 @@ func pwritev(fd int, iovs []Iovec, off int64) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwritev)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -96,7 +96,7 @@ func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept4)), 4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 14ab34a5..1ff3aec7 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1734,28 +1734,6 @@ func exitThread(code int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func readv(fd int, iovs []Iovec) (n int, err error) { var _p0 unsafe.Pointer if len(iovs) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 35f499b3..2df3c5ba 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -1824,28 +1824,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 3cda65b0..a60556ba 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -1824,28 +1824,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 1e1fea90..9f788917 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -1824,28 +1824,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 3b77da11..82a4cb2d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -1824,28 +1824,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 9ab9abf7..66b3b645 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -549,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -557,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2211,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 915761ea..c5c4cc11 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -2213,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 8e87fdf1..93bfbb32 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -549,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -557,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2211,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 12a7a216..a107b8fd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -549,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -557,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2211,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index b19e8aa0..c427de50 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -549,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -557,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2211,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index fb99594c..60c1a99a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -549,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -557,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2211,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 32cbbbc5..52eba360 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -549,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -557,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2211,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 609d1c59..b4018946 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -436,7 +436,7 @@ func pipe(p *[2]_C_int) (n int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procpipe)), 1, uintptr(unsafe.Pointer(p)), 0, 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -446,7 +446,7 @@ func pipe(p *[2]_C_int) (n int, err error) { func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procpipe2)), 2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -456,7 +456,7 @@ func pipe2(p *[2]_C_int, flags int) (err error) { func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetsockname)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -471,7 +471,7 @@ func Getcwd(buf []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetcwd)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -482,7 +482,7 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -492,7 +492,7 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { func setgroups(ngid int, gid *_Gid_t) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procsetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -503,7 +503,7 @@ func wait4(pid int32, statusp *_C_int, options int, rusage *Rusage) (wpid int32, r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwait4)), 4, uintptr(pid), uintptr(unsafe.Pointer(statusp)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int32(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -518,7 +518,7 @@ func gethostname(buf []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgethostname)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -533,7 +533,7 @@ func utimes(path string, times *[2]Timeval) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimes)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -548,7 +548,7 @@ func utimensat(fd int, path string, times *[2]Timespec, flag int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimensat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flag), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -559,7 +559,7 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) val = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -569,7 +569,7 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { func futimesat(fildes int, path *byte, times *[2]Timeval) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfutimesat)), 3, uintptr(fildes), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -580,7 +580,7 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept)), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -591,7 +591,7 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_recvmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -602,7 +602,7 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_sendmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -612,7 +612,7 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { func acct(path *byte) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procacct)), 1, uintptr(unsafe.Pointer(path)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -647,7 +647,7 @@ func ioctlRet(fd int, req int, arg uintptr) (ret int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -658,7 +658,7 @@ func ioctlPtrRet(fd int, req int, arg unsafe.Pointer) (ret int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -669,7 +669,7 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpoll)), 3, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -684,7 +684,7 @@ func Access(path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procAccess)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -694,7 +694,7 @@ func Access(path string, mode uint32) (err error) { func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procAdjtime)), 2, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -709,7 +709,7 @@ func Chdir(path string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -724,7 +724,7 @@ func Chmod(path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChmod)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -739,7 +739,7 @@ func Chown(path string, uid int, gid int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -754,7 +754,7 @@ func Chroot(path string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChroot)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -764,7 +764,7 @@ func Chroot(path string) (err error) { func ClockGettime(clockid int32, time *Timespec) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClockGettime)), 2, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -774,7 +774,7 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { func Close(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClose)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -790,7 +790,7 @@ func Creat(path string, mode uint32) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procCreat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -801,7 +801,7 @@ func Dup(fd int) (nfd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup)), 1, uintptr(fd), 0, 0, 0, 0, 0) nfd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -811,7 +811,7 @@ func Dup(fd int) (nfd int, err error) { func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup2)), 2, uintptr(oldfd), uintptr(newfd), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -833,7 +833,7 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFaccessat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -843,7 +843,7 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { func Fchdir(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchdir)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -853,7 +853,7 @@ func Fchdir(fd int) (err error) { func Fchmod(fd int, mode uint32) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmod)), 2, uintptr(fd), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -868,7 +868,7 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -878,7 +878,7 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchown)), 3, uintptr(fd), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -893,7 +893,7 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchownat)), 5, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -903,7 +903,7 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { func Fdatasync(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFdatasync)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -913,7 +913,7 @@ func Fdatasync(fd int) (err error) { func Flock(fd int, how int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFlock)), 2, uintptr(fd), uintptr(how), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -924,7 +924,7 @@ func Fpathconf(fd int, name int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFpathconf)), 2, uintptr(fd), uintptr(name), 0, 0, 0, 0) val = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -934,7 +934,7 @@ func Fpathconf(fd int, name int) (val int, err error) { func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstat)), 2, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -949,7 +949,7 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -959,7 +959,7 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { func Fstatvfs(fd int, vfsstat *Statvfs_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatvfs)), 2, uintptr(fd), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -974,7 +974,7 @@ func Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetdents)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1001,7 +1001,7 @@ func Getpgid(pid int) (pgid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0) pgid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1012,7 +1012,7 @@ func Getpgrp() (pgid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgrp)), 0, 0, 0, 0, 0, 0, 0) pgid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1047,7 +1047,7 @@ func Getpriority(which int, who int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetpriority)), 2, uintptr(which), uintptr(who), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1057,7 +1057,7 @@ func Getpriority(which int, who int) (n int, err error) { func Getrlimit(which int, lim *Rlimit) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1067,7 +1067,7 @@ func Getrlimit(which int, lim *Rlimit) (err error) { func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrusage)), 2, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1078,7 +1078,7 @@ func Getsid(pid int) (sid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetsid)), 1, uintptr(pid), 0, 0, 0, 0, 0) sid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1088,7 +1088,7 @@ func Getsid(pid int) (sid int, err error) { func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1106,7 +1106,7 @@ func Getuid() (uid int) { func Kill(pid int, signum syscall.Signal) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procKill)), 2, uintptr(pid), uintptr(signum), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1121,7 +1121,7 @@ func Lchown(path string, uid int, gid int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLchown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1141,7 +1141,7 @@ func Link(path string, link string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLink)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1151,7 +1151,7 @@ func Link(path string, link string) (err error) { func Listen(s int, backlog int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1166,7 +1166,7 @@ func Lstat(path string, stat *Stat_t) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLstat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1180,7 +1180,7 @@ func Madvise(b []byte, advice int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMadvise)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(advice), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1195,7 +1195,7 @@ func Mkdir(path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkdir)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1210,7 +1210,7 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkdirat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1225,7 +1225,7 @@ func Mkfifo(path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifo)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1240,7 +1240,7 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifoat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1255,7 +1255,7 @@ func Mknod(path string, mode uint32, dev int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMknod)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1270,7 +1270,7 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMknodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1284,7 +1284,7 @@ func Mlock(b []byte) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1294,7 +1294,7 @@ func Mlock(b []byte) (err error) { func Mlockall(flags int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlockall)), 1, uintptr(flags), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1308,7 +1308,7 @@ func Mprotect(b []byte, prot int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMprotect)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(prot), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1322,7 +1322,7 @@ func Msync(b []byte, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMsync)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(flags), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1336,7 +1336,7 @@ func Munlock(b []byte) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1346,7 +1346,7 @@ func Munlock(b []byte) (err error) { func Munlockall() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlockall)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1356,7 +1356,7 @@ func Munlockall() (err error) { func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procNanosleep)), 2, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1372,7 +1372,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procOpen)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1388,7 +1388,7 @@ func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procOpenat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1404,7 +1404,7 @@ func Pathconf(path string, name int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPathconf)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0, 0, 0, 0) val = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1414,7 +1414,7 @@ func Pathconf(path string, name int) (val int, err error) { func Pause() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPause)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1429,7 +1429,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpread)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1444,7 +1444,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwrite)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1459,7 +1459,7 @@ func read(fd int, p []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1479,7 +1479,7 @@ func Readlink(path string, buf []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procReadlink)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(len(buf)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1499,7 +1499,7 @@ func Rename(from string, to string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRename)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1519,7 +1519,7 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRenameat)), 4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1534,7 +1534,7 @@ func Rmdir(path string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRmdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1545,7 +1545,7 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proclseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0) newoffset = int64(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1556,7 +1556,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSelect)), 5, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1566,7 +1566,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err func Setegid(egid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetegid)), 1, uintptr(egid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1576,7 +1576,7 @@ func Setegid(egid int) (err error) { func Seteuid(euid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSeteuid)), 1, uintptr(euid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1586,7 +1586,7 @@ func Seteuid(euid int) (err error) { func Setgid(gid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetgid)), 1, uintptr(gid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1600,7 +1600,7 @@ func Sethostname(p []byte) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSethostname)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1610,7 +1610,7 @@ func Sethostname(p []byte) (err error) { func Setpgid(pid int, pgid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetpgid)), 2, uintptr(pid), uintptr(pgid), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1620,7 +1620,7 @@ func Setpgid(pid int, pgid int) (err error) { func Setpriority(which int, who int, prio int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSetpriority)), 3, uintptr(which), uintptr(who), uintptr(prio), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1630,7 +1630,7 @@ func Setpriority(which int, who int, prio int) (err error) { func Setregid(rgid int, egid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetregid)), 2, uintptr(rgid), uintptr(egid), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1640,7 +1640,7 @@ func Setregid(rgid int, egid int) (err error) { func Setreuid(ruid int, euid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetreuid)), 2, uintptr(ruid), uintptr(euid), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1651,7 +1651,7 @@ func Setsid() (pid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetsid)), 0, 0, 0, 0, 0, 0, 0) pid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1661,7 +1661,7 @@ func Setsid() (pid int, err error) { func Setuid(uid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetuid)), 1, uintptr(uid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1671,7 +1671,7 @@ func Setuid(uid int) (err error) { func Shutdown(s int, how int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procshutdown)), 2, uintptr(s), uintptr(how), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1686,7 +1686,7 @@ func Stat(path string, stat *Stat_t) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procStat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1701,7 +1701,7 @@ func Statvfs(path string, vfsstat *Statvfs_t) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procStatvfs)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1721,7 +1721,7 @@ func Symlink(path string, link string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSymlink)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1731,7 +1731,7 @@ func Symlink(path string, link string) (err error) { func Sync() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSync)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1742,7 +1742,7 @@ func Sysconf(which int) (n int64, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSysconf)), 1, uintptr(which), 0, 0, 0, 0, 0) n = int64(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1753,7 +1753,7 @@ func Times(tms *Tms) (ticks uintptr, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procTimes)), 1, uintptr(unsafe.Pointer(tms)), 0, 0, 0, 0, 0) ticks = uintptr(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1768,7 +1768,7 @@ func Truncate(path string, length int64) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procTruncate)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1778,7 +1778,7 @@ func Truncate(path string, length int64) (err error) { func Fsync(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFsync)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1788,7 +1788,7 @@ func Fsync(fd int) (err error) { func Ftruncate(fd int, length int64) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFtruncate)), 2, uintptr(fd), uintptr(length), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1806,7 +1806,7 @@ func Umask(mask int) (oldmask int) { func Uname(buf *Utsname) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procUname)), 1, uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1821,7 +1821,7 @@ func Unmount(target string, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procumount)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1836,7 +1836,7 @@ func Unlink(path string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUnlink)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1851,7 +1851,7 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUnlinkat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1861,7 +1861,7 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { func Ustat(dev int, ubuf *Ustat_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUstat)), 2, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1876,7 +1876,7 @@ func Utime(path string, buf *Utimbuf) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUtime)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1886,7 +1886,7 @@ func Utime(path string, buf *Utimbuf) (err error) { func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_bind)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1896,7 +1896,7 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_connect)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1907,7 +1907,7 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmmap)), 6, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1917,7 +1917,7 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmunmap)), 2, uintptr(addr), uintptr(length), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1928,7 +1928,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsendfile)), 4, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1942,7 +1942,7 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_sendto)), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1953,7 +1953,7 @@ func socket(domain int, typ int, proto int) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_socket)), 3, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1963,7 +1963,7 @@ func socket(domain int, typ int, proto int) (fd int, err error) { func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&proc__xnet_socketpair)), 4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1978,7 +1978,7 @@ func write(fd int, p []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1988,7 +1988,7 @@ func write(fd int, p []byte) (n int, err error) { func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_getsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1998,7 +1998,7 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetpeername)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2008,7 +2008,7 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsetsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2023,7 +2023,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procrecvfrom)), 6, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2034,7 +2034,7 @@ func port_create() (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2045,7 +2045,7 @@ func port_associate(port int, source int, object uintptr, events int, user *byte r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_associate)), 5, uintptr(port), uintptr(source), uintptr(object), uintptr(events), uintptr(unsafe.Pointer(user)), 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2056,7 +2056,7 @@ func port_dissociate(port int, source int, object uintptr) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_dissociate)), 3, uintptr(port), uintptr(source), uintptr(object), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2067,7 +2067,7 @@ func port_get(port int, pe *portEvent, timeout *Timespec) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_get)), 3, uintptr(port), uintptr(unsafe.Pointer(pe)), uintptr(unsafe.Pointer(timeout)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2078,7 +2078,7 @@ func port_getn(port int, pe *portEvent, max uint32, nget *uint32, timeout *Times r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_getn)), 5, uintptr(port), uintptr(unsafe.Pointer(pe)), uintptr(max), uintptr(unsafe.Pointer(nget)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2088,7 +2088,7 @@ func port_getn(port int, pe *portEvent, max uint32, nget *uint32, timeout *Times func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procputmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2098,7 +2098,7 @@ func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) { func getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(unsafe.Pointer(flags)), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index c3168174..1d8fe1d4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -40,17 +40,6 @@ func read(fd int, p []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c9c4ad03..9862853d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -447,4 +447,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 12ff3417..8901f0f4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -369,4 +369,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index c3fb5e77..6902c37e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -411,4 +411,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 358c847a..a6d3dff8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -314,4 +314,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 81c4849b..b18f3f71 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -308,4 +308,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 202a57e9..0302e5e3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -431,4 +431,5 @@ const ( SYS_PROCESS_MRELEASE = 4448 SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 + SYS_CACHESTAT = 4451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 1fbceb52..6693ba4a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -361,4 +361,5 @@ const ( SYS_PROCESS_MRELEASE = 5448 SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 + SYS_CACHESTAT = 5451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index b4ffb7a2..fd93f498 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -361,4 +361,5 @@ const ( SYS_PROCESS_MRELEASE = 5448 SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 + SYS_CACHESTAT = 5451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 867985f9..760ddcad 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -431,4 +431,5 @@ const ( SYS_PROCESS_MRELEASE = 4448 SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 + SYS_CACHESTAT = 4451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index a8cce69e..cff2b255 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -438,4 +438,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index d44c5b39..a4b2405d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -410,4 +410,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 4214dd9c..aca54b4e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -410,4 +410,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index ef285c56..9d1738d6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -315,4 +315,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index e6ed7d63..022878dc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -376,4 +376,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 92f628ef..4100a761 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -389,4 +389,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 494493c7..18aa70b4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1977,7 +1977,7 @@ const ( NFT_MSG_GETFLOWTABLE = 0x17 NFT_MSG_DELFLOWTABLE = 0x18 NFT_MSG_GETRULE_RESET = 0x19 - NFT_MSG_MAX = 0x21 + NFT_MSG_MAX = 0x22 NFTA_LIST_UNSPEC = 0x0 NFTA_LIST_ELEM = 0x1 NFTA_HOOK_UNSPEC = 0x0 @@ -4499,7 +4499,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x145 + NL80211_ATTR_MAX = 0x146 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -4869,7 +4869,7 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x99 + NL80211_CMD_MAX = 0x9a NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5503,7 +5503,7 @@ const ( NL80211_RATE_INFO_HE_RU_ALLOC_52 = 0x1 NL80211_RATE_INFO_HE_RU_ALLOC_996 = 0x5 NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 - NL80211_RATE_INFO_MAX = 0x16 + NL80211_RATE_INFO_MAX = 0x1d NL80211_RATE_INFO_MCS = 0x2 NL80211_RATE_INFO_SHORT_GI = 0x4 NL80211_RATE_INFO_VHT_MCS = 0x6 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 83c69c11..1b4c97c3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -733,6 +733,10 @@ const ( RISCV_HWPROBE_KEY_IMA_EXT_0 = 0x4 RISCV_HWPROBE_IMA_FD = 0x1 RISCV_HWPROBE_IMA_C = 0x2 + RISCV_HWPROBE_IMA_V = 0x4 + RISCV_HWPROBE_EXT_ZBA = 0x8 + RISCV_HWPROBE_EXT_ZBB = 0x10 + RISCV_HWPROBE_EXT_ZBS = 0x20 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go index a52e0331..9cabbb69 100644 --- a/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -22,7 +22,7 @@ import ( // but only if there is space or tab inside s. func EscapeArg(s string) string { if len(s) == 0 { - return "\"\"" + return `""` } n := len(s) hasSpace := false @@ -35,7 +35,7 @@ func EscapeArg(s string) string { } } if hasSpace { - n += 2 + n += 2 // Reserve space for quotes. } if n == len(s) { return s @@ -82,20 +82,68 @@ func EscapeArg(s string) string { // in CreateProcess's CommandLine argument, CreateService/ChangeServiceConfig's BinaryPathName argument, // or any program that uses CommandLineToArgv. func ComposeCommandLine(args []string) string { - var commandLine string - for i := range args { - if i > 0 { - commandLine += " " + if len(args) == 0 { + return "" + } + + // Per https://learn.microsoft.com/en-us/windows/win32/api/shellapi/nf-shellapi-commandlinetoargvw: + // “This function accepts command lines that contain a program name; the + // program name can be enclosed in quotation marks or not.” + // + // Unfortunately, it provides no means of escaping interior quotation marks + // within that program name, and we have no way to report them here. + prog := args[0] + mustQuote := len(prog) == 0 + for i := 0; i < len(prog); i++ { + c := prog[i] + if c <= ' ' || (c == '"' && i == 0) { + // Force quotes for not only the ASCII space and tab as described in the + // MSDN article, but also ASCII control characters. + // The documentation for CommandLineToArgvW doesn't say what happens when + // the first argument is not a valid program name, but it empirically + // seems to drop unquoted control characters. + mustQuote = true + break + } + } + var commandLine []byte + if mustQuote { + commandLine = make([]byte, 0, len(prog)+2) + commandLine = append(commandLine, '"') + for i := 0; i < len(prog); i++ { + c := prog[i] + if c == '"' { + // This quote would interfere with our surrounding quotes. + // We have no way to report an error, so just strip out + // the offending character instead. + continue + } + commandLine = append(commandLine, c) } - commandLine += EscapeArg(args[i]) + commandLine = append(commandLine, '"') + } else { + if len(args) == 1 { + // args[0] is a valid command line representing itself. + // No need to allocate a new slice or string for it. + return prog + } + commandLine = []byte(prog) } - return commandLine + + for _, arg := range args[1:] { + commandLine = append(commandLine, ' ') + // TODO(bcmills): since we're already appending to a slice, it would be nice + // to avoid the intermediate allocations of EscapeArg. + // Perhaps we can factor out an appendEscapedArg function. + commandLine = append(commandLine, EscapeArg(arg)...) + } + return string(commandLine) } // DecomposeCommandLine breaks apart its argument command line into unescaped parts using CommandLineToArgv, // as gathered from GetCommandLine, QUERY_SERVICE_CONFIG's BinaryPathName argument, or elsewhere that // command lines are passed around. -// DecomposeCommandLine returns error if commandLine contains NUL. +// DecomposeCommandLine returns an error if commandLine contains NUL. func DecomposeCommandLine(commandLine string) ([]string, error) { if len(commandLine) == 0 { return []string{}, nil @@ -105,18 +153,35 @@ func DecomposeCommandLine(commandLine string) ([]string, error) { return nil, errorspkg.New("string with NUL passed to DecomposeCommandLine") } var argc int32 - argv, err := CommandLineToArgv(&utf16CommandLine[0], &argc) + argv, err := commandLineToArgv(&utf16CommandLine[0], &argc) if err != nil { return nil, err } defer LocalFree(Handle(unsafe.Pointer(argv))) + var args []string - for _, v := range (*argv)[:argc] { - args = append(args, UTF16ToString((*v)[:])) + for _, p := range unsafe.Slice(argv, argc) { + args = append(args, UTF16PtrToString(p)) } return args, nil } +// CommandLineToArgv parses a Unicode command line string and sets +// argc to the number of parsed arguments. +// +// The returned memory should be freed using a single call to LocalFree. +// +// Note that although the return type of CommandLineToArgv indicates 8192 +// entries of up to 8192 characters each, the actual count of parsed arguments +// may exceed 8192, and the documentation for CommandLineToArgvW does not mention +// any bound on the lengths of the individual argument strings. +// (See https://go.dev/issue/63236.) +func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { + argp, err := commandLineToArgv(cmd, argc) + argv = (*[8192]*[8192]uint16)(unsafe.Pointer(argp)) + return argv, err +} + func CloseOnExec(fd Handle) { SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0) } diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index d414ef13..26be94a8 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -7,8 +7,6 @@ package windows import ( "syscall" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) const ( @@ -1341,21 +1339,14 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() sdLen = min } - var src []byte - h := (*unsafeheader.Slice)(unsafe.Pointer(&src)) - h.Data = unsafe.Pointer(selfRelativeSD) - h.Len = sdLen - h.Cap = sdLen - + src := unsafe.Slice((*byte)(unsafe.Pointer(selfRelativeSD)), sdLen) + // SECURITY_DESCRIPTOR has pointers in it, which means checkptr expects for it to + // be aligned properly. When we're copying a Windows-allocated struct to a + // Go-allocated one, make sure that the Go allocation is aligned to the + // pointer size. const psize = int(unsafe.Sizeof(uintptr(0))) - - var dst []byte - h = (*unsafeheader.Slice)(unsafe.Pointer(&dst)) alloc := make([]uintptr, (sdLen+psize-1)/psize) - h.Data = (*unsafeheader.Slice)(unsafe.Pointer(&alloc)).Data - h.Len = sdLen - h.Cap = sdLen - + dst := unsafe.Slice((*byte)(unsafe.Pointer(&alloc[0])), sdLen) copy(dst, src) return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0])) } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 67bad092..35cfc57c 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -15,8 +15,6 @@ import ( "time" "unicode/utf16" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) type Handle uintptr @@ -240,7 +238,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW //sys GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) = kernel32.GetFileAttributesExW //sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW -//sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW +//sys commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW //sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] //sys LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) //sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) @@ -299,12 +297,15 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, event Handle, asynchronous bool) (regerrno error) = advapi32.RegNotifyChangeKeyValue //sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId //sys ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) = kernel32.ProcessIdToSessionId +//sys ClosePseudoConsole(console Handle) = kernel32.ClosePseudoConsole +//sys createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) = kernel32.CreatePseudoConsole //sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW //sys Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32NextW @@ -1667,12 +1668,8 @@ func NewNTUnicodeString(s string) (*NTUnicodeString, error) { // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. func (s *NTUnicodeString) Slice() []uint16 { - var slice []uint16 - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) - hdr.Data = unsafe.Pointer(s.Buffer) - hdr.Len = int(s.Length) - hdr.Cap = int(s.MaximumLength) - return slice + slice := unsafe.Slice(s.Buffer, s.MaximumLength) + return slice[:s.Length] } func (s *NTUnicodeString) String() string { @@ -1695,12 +1692,8 @@ func NewNTString(s string) (*NTString, error) { // Slice returns a byte slice that aliases the data in the NTString. func (s *NTString) Slice() []byte { - var slice []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) - hdr.Data = unsafe.Pointer(s.Buffer) - hdr.Len = int(s.Length) - hdr.Cap = int(s.MaximumLength) - return slice + slice := unsafe.Slice(s.Buffer, s.MaximumLength) + return slice[:s.Length] } func (s *NTString) String() string { @@ -1752,10 +1745,7 @@ func LoadResourceData(module, resInfo Handle) (data []byte, err error) { if err != nil { return } - h := (*unsafeheader.Slice)(unsafe.Pointer(&data)) - h.Data = unsafe.Pointer(ptr) - h.Len = int(size) - h.Cap = int(size) + data = unsafe.Slice((*byte)(unsafe.Pointer(ptr)), size) return } @@ -1826,3 +1816,17 @@ type PSAPI_WORKING_SET_EX_INFORMATION struct { // A PSAPI_WORKING_SET_EX_BLOCK union that indicates the attributes of the page at VirtualAddress. VirtualAttributes PSAPI_WORKING_SET_EX_BLOCK } + +// CreatePseudoConsole creates a windows pseudo console. +func CreatePseudoConsole(size Coord, in Handle, out Handle, flags uint32, pconsole *Handle) error { + // We need this wrapper to manually cast Coord to uint32. The autogenerated wrappers only + // accept arguments that can be casted to uintptr, and Coord can't. + return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), in, out, flags, pconsole) +} + +// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`. +func ResizePseudoConsole(pconsole Handle, size Coord) error { + // We need this wrapper to manually cast Coord to uint32. The autogenerated wrappers only + // accept arguments that can be casted to uintptr, and Coord can't. + return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size)))) +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 88e62a63..b88dc7c8 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -247,6 +247,7 @@ const ( PROC_THREAD_ATTRIBUTE_MITIGATION_POLICY = 0x00020007 PROC_THREAD_ATTRIBUTE_UMS_THREAD = 0x00030006 PROC_THREAD_ATTRIBUTE_PROTECTION_LEVEL = 0x0002000b + PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x00020016 ) const ( @@ -2139,6 +2140,12 @@ const ( ENABLE_LVB_GRID_WORLDWIDE = 0x10 ) +// Pseudo console related constants used for the flags parameter to +// CreatePseudoConsole. See: https://learn.microsoft.com/en-us/windows/console/createpseudoconsole +const ( + PSEUDOCONSOLE_INHERIT_CURSOR = 0x1 +) + type Coord struct { X int16 Y int16 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 5c385580..8b1688de 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -188,6 +188,7 @@ var ( procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") procCloseHandle = modkernel32.NewProc("CloseHandle") + procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") procCreateEventExW = modkernel32.NewProc("CreateEventExW") @@ -202,6 +203,7 @@ var ( procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") procCreatePipe = modkernel32.NewProc("CreatePipe") procCreateProcessW = modkernel32.NewProc("CreateProcessW") + procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole") procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") @@ -328,6 +330,7 @@ var ( procReleaseMutex = modkernel32.NewProc("ReleaseMutex") procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") procResetEvent = modkernel32.NewProc("ResetEvent") + procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") @@ -1633,6 +1636,11 @@ func CloseHandle(handle Handle) (err error) { return } +func ClosePseudoConsole(console Handle) { + syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0) + return +} + func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { @@ -1762,6 +1770,14 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA return } +func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) { + r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0) + if r0 != 0 { + hr = syscall.Errno(r0) + } + return +} + func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) if r1&0xff == 0 { @@ -2862,6 +2878,14 @@ func ResetEvent(event Handle) (err error) { return } +func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { + r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0) + if r0 != 0 { + hr = syscall.Errno(r0) + } + return +} + func ResumeThread(thread Handle) (ret uint32, err error) { r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) ret = uint32(r0) @@ -3820,9 +3844,9 @@ func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (er return } -func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { +func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) - argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) + argv = (**uint16)(unsafe.Pointer(r0)) if argv == nil { err = errnoErr(e1) } diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 9a475a24..4ef0af54 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -550,6 +550,56 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "move": { + "description": "Moves the specified address resource.", + "flatPath": "projects/{project}/regions/{region}/addresses/{address}/move", + "httpMethod": "POST", + "id": "compute.addresses.move", + "parameterOrder": [ + "project", + "region", + "address" + ], + "parameters": { + "address": { + "description": "Name of the address resource to move.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Source project ID which the Address is moved from.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "Name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/addresses/{address}/move", + "request": { + "$ref": "RegionAddressesMoveRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setLabels": { "description": "Sets the labels on an Address. To learn more about labels, read the Labeling Resources documentation.", "flatPath": "projects/{project}/regions/{region}/addresses/{resource}/setLabels", @@ -712,7 +762,7 @@ ] }, "get": { - "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", + "description": "Returns the specified autoscaler resource.", "flatPath": "projects/{project}/zones/{zone}/autoscalers/{autoscaler}", "httpMethod": "GET", "id": "compute.autoscalers.get", @@ -1085,7 +1135,7 @@ ] }, "get": { - "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request.", + "description": "Returns the specified BackendBucket resource.", "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}", "httpMethod": "GET", "id": "compute.backendBuckets.get", @@ -1523,7 +1573,7 @@ ] }, "get": { - "description": "Returns the specified BackendService resource. Gets a list of available backend services.", + "description": "Returns the specified BackendService resource.", "flatPath": "projects/{project}/global/backendServices/{backendService}", "httpMethod": "GET", "id": "compute.backendServices.get", @@ -1992,7 +2042,7 @@ ] }, "get": { - "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request.", + "description": "Returns the specified disk type.", "flatPath": "projects/{project}/zones/{zone}/diskTypes/{diskType}", "httpMethod": "GET", "id": "compute.diskTypes.get", @@ -2313,7 +2363,7 @@ ] }, "get": { - "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request.", + "description": "Returns the specified persistent disk.", "flatPath": "projects/{project}/zones/{zone}/disks/{disk}", "httpMethod": "GET", "id": "compute.disks.get", @@ -2754,6 +2804,67 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "update": { + "description": "Updates the specified disk with the data included in the request. The update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: user_license.", + "flatPath": "projects/{project}/zones/{zone}/disks/{disk}", + "httpMethod": "PATCH", + "id": "compute.disks.update", + "parameterOrder": [ + "project", + "zone", + "disk" + ], + "parameters": { + "disk": { + "description": "The disk name for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "paths": { + "location": "query", + "repeated": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "updateMask": { + "description": "update_mask indicates fields to be updated as part of this request.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/disks/{disk}", + "request": { + "$ref": "Disk" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -3325,7 +3436,7 @@ "type": "string" }, "parentId": { - "description": "Parent ID for this request.", + "description": "Parent ID for this request. The ID can be either be \"folders/[FOLDER_ID]\" if the parent is a folder or \"organizations/[ORGANIZATION_ID]\" if the parent is an organization.", "location": "query", "type": "string" }, @@ -3384,7 +3495,7 @@ "type": "string" }, "parentId": { - "description": "The new parent of the firewall policy.", + "description": "The new parent of the firewall policy. The ID can be either be \"folders/[FOLDER_ID]\" if the parent is a folder or \"organizations/[ORGANIZATION_ID]\" if the parent is an organization.", "location": "query", "type": "string" }, @@ -4313,7 +4424,7 @@ ] }, "get": { - "description": "Returns the specified address resource. Gets a list of available addresses by making a list() request.", + "description": "Returns the specified address resource.", "flatPath": "projects/{project}/global/addresses/{address}", "httpMethod": "GET", "id": "compute.globalAddresses.get", @@ -4436,6 +4547,48 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "move": { + "description": "Moves the specified address resource from one project to another project.", + "flatPath": "projects/{project}/global/addresses/{address}/move", + "httpMethod": "POST", + "id": "compute.globalAddresses.move", + "parameterOrder": [ + "project", + "address" + ], + "parameters": { + "address": { + "description": "Name of the address resource to move.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Source project ID which the Address is moved from.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/global/addresses/{address}/move", + "request": { + "$ref": "GlobalAddressesMoveRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "setLabels": { "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", "flatPath": "projects/{project}/global/addresses/{resource}/setLabels", @@ -4886,7 +5039,7 @@ ] }, "get": { - "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + "description": "Returns the specified network endpoint group.", "flatPath": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}", "httpMethod": "GET", "id": "compute.globalNetworkEndpointGroups.get", @@ -5717,7 +5870,7 @@ ] }, "get": { - "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + "description": "Returns the specified HealthCheck resource.", "flatPath": "projects/{project}/global/healthChecks/{healthCheck}", "httpMethod": "GET", "id": "compute.healthChecks.get", @@ -5968,7 +6121,7 @@ ] }, "get": { - "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request.", + "description": "Returns the specified HttpHealthCheck resource.", "flatPath": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", "httpMethod": "GET", "id": "compute.httpHealthChecks.get", @@ -6219,7 +6372,7 @@ ] }, "get": { - "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request.", + "description": "Returns the specified HttpsHealthCheck resource.", "flatPath": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", "httpMethod": "GET", "id": "compute.httpsHealthChecks.get", @@ -6559,7 +6712,7 @@ ] }, "get": { - "description": "Returns the specified image. Gets a list of available images by making a list() request.", + "description": "Returns the specified image.", "flatPath": "projects/{project}/global/images/{image}", "httpMethod": "GET", "id": "compute.images.get", @@ -6594,7 +6747,7 @@ ] }, "getFromFamily": { - "description": "Returns the latest image that is part of an image family and is not deprecated.", + "description": "Returns the latest image that is part of an image family and is not deprecated. For more information on image families, see Public image families documentation.", "flatPath": "projects/{project}/global/images/family/{family}", "httpMethod": "GET", "id": "compute.images.getFromFamily", @@ -6611,7 +6764,7 @@ "type": "string" }, "project": { - "description": "Project ID for this request.", + "description": "The image project that the image belongs to. For example, to get a CentOS image, specify centos-cloud as the image project.", "location": "path", "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, @@ -7260,7 +7413,7 @@ ] }, "get": { - "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request.", + "description": "Returns all of the details about the specified managed instance group.", "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", "httpMethod": "GET", "id": "compute.instanceGroupManagers.get", @@ -8424,6 +8577,66 @@ }, "instanceTemplates": { "methods": { + "aggregatedList": { + "description": "Retrieves the list of all InstanceTemplates resources, regional and global, available to the specified project.", + "flatPath": "projects/{project}/aggregated/instanceTemplates", + "httpMethod": "GET", + "id": "compute.instanceTemplates.aggregatedList", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "includeAllScopes": { + "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Name of the project scoping this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/aggregated/instanceTemplates", + "response": { + "$ref": "InstanceTemplateAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "delete": { "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone. It is not possible to delete templates that are already in use by a managed instance group.", "flatPath": "projects/{project}/global/instanceTemplates/{instanceTemplate}", @@ -8464,7 +8677,7 @@ ] }, "get": { - "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request.", + "description": "Returns the specified instance template.", "flatPath": "projects/{project}/global/instanceTemplates/{instanceTemplate}", "httpMethod": "GET", "id": "compute.instanceTemplates.get", @@ -9134,7 +9347,7 @@ ] }, "get": { - "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request.", + "description": "Returns the specified Instance resource.", "flatPath": "projects/{project}/zones/{zone}/instances/{instance}", "httpMethod": "GET", "id": "compute.instances.get", @@ -10251,11 +10464,11 @@ "https://www.googleapis.com/auth/compute" ] }, - "setScheduling": { - "description": "Sets an instance's scheduling options. You can only call this method on a stopped instance, that is, a VM instance that is in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. For more information about setting scheduling options for a VM, see Set VM host maintenance policy.", - "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setScheduling", + "setName": { + "description": "Sets name of an instance.", + "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setName", "httpMethod": "POST", - "id": "compute.instances.setScheduling", + "id": "compute.instances.setName", "parameterOrder": [ "project", "zone", @@ -10263,7 +10476,7 @@ ], "parameters": { "instance": { - "description": "Instance name for this request.", + "description": "The instance name for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -10289,9 +10502,9 @@ "type": "string" } }, - "path": "projects/{project}/zones/{zone}/instances/{instance}/setScheduling", + "path": "projects/{project}/zones/{zone}/instances/{instance}/setName", "request": { - "$ref": "Scheduling" + "$ref": "InstancesSetNameRequest" }, "response": { "$ref": "Operation" @@ -10301,11 +10514,11 @@ "https://www.googleapis.com/auth/compute" ] }, - "setServiceAccount": { - "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", - "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", + "setScheduling": { + "description": "Sets an instance's scheduling options. You can only call this method on a stopped instance, that is, a VM instance that is in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. For more information about setting scheduling options for a VM, see Set VM host maintenance policy.", + "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setScheduling", "httpMethod": "POST", - "id": "compute.instances.setServiceAccount", + "id": "compute.instances.setScheduling", "parameterOrder": [ "project", "zone", @@ -10313,7 +10526,7 @@ ], "parameters": { "instance": { - "description": "Name of the instance resource to start.", + "description": "Instance name for this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -10339,9 +10552,9 @@ "type": "string" } }, - "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", + "path": "projects/{project}/zones/{zone}/instances/{instance}/setScheduling", "request": { - "$ref": "InstancesSetServiceAccountRequest" + "$ref": "Scheduling" }, "response": { "$ref": "Operation" @@ -10351,11 +10564,11 @@ "https://www.googleapis.com/auth/compute" ] }, - "setShieldedInstanceIntegrityPolicy": { - "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", - "httpMethod": "PATCH", - "id": "compute.instances.setShieldedInstanceIntegrityPolicy", + "setServiceAccount": { + "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", + "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", + "httpMethod": "POST", + "id": "compute.instances.setServiceAccount", "parameterOrder": [ "project", "zone", @@ -10363,7 +10576,7 @@ ], "parameters": { "instance": { - "description": "Name or id of the instance scoping this request.", + "description": "Name of the instance resource to start.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -10389,9 +10602,9 @@ "type": "string" } }, - "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", + "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", "request": { - "$ref": "ShieldedInstanceIntegrityPolicy" + "$ref": "InstancesSetServiceAccountRequest" }, "response": { "$ref": "Operation" @@ -10401,11 +10614,11 @@ "https://www.googleapis.com/auth/compute" ] }, - "setTags": { - "description": "Sets network tags for the specified instance to the data included in the request.", - "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setTags", - "httpMethod": "POST", - "id": "compute.instances.setTags", + "setShieldedInstanceIntegrityPolicy": { + "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", + "httpMethod": "PATCH", + "id": "compute.instances.setShieldedInstanceIntegrityPolicy", "parameterOrder": [ "project", "zone", @@ -10413,7 +10626,7 @@ ], "parameters": { "instance": { - "description": "Name of the instance scoping this request.", + "description": "Name or id of the instance scoping this request.", "location": "path", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, @@ -10439,9 +10652,9 @@ "type": "string" } }, - "path": "projects/{project}/zones/{zone}/instances/{instance}/setTags", + "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", "request": { - "$ref": "Tags" + "$ref": "ShieldedInstanceIntegrityPolicy" }, "response": { "$ref": "Operation" @@ -10451,11 +10664,11 @@ "https://www.googleapis.com/auth/compute" ] }, - "simulateMaintenanceEvent": { - "description": "Simulates a host maintenance event on a VM. For more information, see Simulate a host maintenance event.", - "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + "setTags": { + "description": "Sets network tags for the specified instance to the data included in the request.", + "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setTags", "httpMethod": "POST", - "id": "compute.instances.simulateMaintenanceEvent", + "id": "compute.instances.setTags", "parameterOrder": [ "project", "zone", @@ -10476,6 +10689,61 @@ "required": true, "type": "string" }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/instances/{instance}/setTags", + "request": { + "$ref": "Tags" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "simulateMaintenanceEvent": { + "description": "Simulates a host maintenance event on a VM. For more information, see Simulate a host maintenance event.", + "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent", + "httpMethod": "POST", + "id": "compute.instances.simulateMaintenanceEvent", + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "parameters": { + "instance": { + "description": "Name of the instance scoping this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, "zone": { "description": "The name of the zone for this request.", "location": "path", @@ -11498,6 +11766,100 @@ } } }, + "interconnectRemoteLocations": { + "methods": { + "get": { + "description": "Returns the details for the specified interconnect remote location. Gets a list of available interconnect remote locations by making a list() request.", + "flatPath": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", + "httpMethod": "GET", + "id": "compute.interconnectRemoteLocations.get", + "parameterOrder": [ + "project", + "interconnectRemoteLocation" + ], + "parameters": { + "interconnectRemoteLocation": { + "description": "Name of the interconnect remote location to return.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", + "response": { + "$ref": "InterconnectRemoteLocation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "list": { + "description": "Retrieves the list of interconnect remote locations available to the specified project.", + "flatPath": "projects/{project}/global/interconnectRemoteLocations", + "httpMethod": "GET", + "id": "compute.interconnectRemoteLocations.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/global/interconnectRemoteLocations", + "response": { + "$ref": "InterconnectRemoteLocationList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, "interconnects": { "methods": { "delete": { @@ -12184,7 +12546,7 @@ ] }, "get": { - "description": "Returns the specified machine image. Gets a list of available machine images by making a list() request.", + "description": "Returns the specified machine image.", "flatPath": "projects/{project}/global/machineImages/{machineImage}", "httpMethod": "GET", "id": "compute.machineImages.get", @@ -12493,7 +12855,7 @@ ] }, "get": { - "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request.", + "description": "Returns the specified machine type.", "flatPath": "projects/{project}/zones/{zone}/machineTypes/{machineType}", "httpMethod": "GET", "id": "compute.machineTypes.get", @@ -13465,7 +13827,7 @@ ] }, "get": { - "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + "description": "Returns the specified network endpoint group.", "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", "httpMethod": "GET", "id": "compute.networkEndpointGroups.get", @@ -14500,7 +14862,7 @@ ] }, "get": { - "description": "Returns the specified network. Gets a list of available networks by making a list() request.", + "description": "Returns the specified network.", "flatPath": "projects/{project}/global/networks/{network}", "httpMethod": "GET", "id": "compute.networks.get", @@ -15541,6 +15903,56 @@ "https://www.googleapis.com/auth/compute" ] }, + "simulateMaintenanceEvent": { + "description": "Simulates maintenance event on specified nodes from the node group.", + "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent", + "httpMethod": "POST", + "id": "compute.nodeGroups.simulateMaintenanceEvent", + "parameterOrder": [ + "project", + "zone", + "nodeGroup" + ], + "parameters": { + "nodeGroup": { + "description": "Name of the NodeGroup resource whose nodes will go under maintenance simulation.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "zone": { + "description": "The name of the zone for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent", + "request": { + "$ref": "NodeGroupsSimulateMaintenanceEventRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "testIamPermissions": { "description": "Returns permissions that a caller has on the specified resource.", "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions", @@ -15699,7 +16111,7 @@ ] }, "get": { - "description": "Returns the specified node template. Gets a list of available node templates by making a list() request.", + "description": "Returns the specified node template.", "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", "httpMethod": "GET", "id": "compute.nodeTemplates.get", @@ -16051,7 +16463,7 @@ ] }, "get": { - "description": "Returns the specified node type. Gets a list of available node types by making a list() request.", + "description": "Returns the specified node type.", "flatPath": "projects/{project}/zones/{zone}/nodeTypes/{nodeType}", "httpMethod": "GET", "id": "compute.nodeTypes.get", @@ -18298,7 +18710,7 @@ ] }, "get": { - "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request.", + "description": "Returns the specified commitment resource.", "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}", "httpMethod": "GET", "id": "compute.regionCommitments.get", @@ -18511,7 +18923,7 @@ "regionDiskTypes": { "methods": { "get": { - "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request.", + "description": "Returns the specified regional disk type.", "flatPath": "projects/{project}/regions/{region}/diskTypes/{diskType}", "httpMethod": "GET", "id": "compute.regionDiskTypes.get", @@ -19208,6 +19620,67 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "update": { + "description": "Update the specified disk with the data included in the request. Update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: user_license.", + "flatPath": "projects/{project}/regions/{region}/disks/{disk}", + "httpMethod": "PATCH", + "id": "compute.regionDisks.update", + "parameterOrder": [ + "project", + "region", + "disk" + ], + "parameters": { + "disk": { + "description": "The disk name for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + "required": true, + "type": "string" + }, + "paths": { + "location": "query", + "repeated": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + }, + "updateMask": { + "description": "update_mask indicates fields to be updated as part of this request.", + "format": "google-fieldmask", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/disks/{disk}", + "request": { + "$ref": "Disk" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -19507,7 +19980,7 @@ ] }, "get": { - "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + "description": "Returns the specified HealthCheck resource.", "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", "httpMethod": "GET", "id": "compute.regionHealthChecks.get", @@ -20955,68 +21428,23 @@ } } }, - "regionInstances": { - "methods": { - "bulkInsert": { - "description": "Creates multiple instances in a given region. Count specifies the number of instances to create.", - "flatPath": "projects/{project}/regions/{region}/instances/bulkInsert", - "httpMethod": "POST", - "id": "compute.regionInstances.bulkInsert", - "parameterOrder": [ - "project", - "region" - ], - "parameters": { - "project": { - "description": "Project ID for this request.", - "location": "path", - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - "required": true, - "type": "string" - }, - "region": { - "description": "The name of the region for this request.", - "location": "path", - "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - "required": true, - "type": "string" - }, - "requestId": { - "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - "location": "query", - "type": "string" - } - }, - "path": "projects/{project}/regions/{region}/instances/bulkInsert", - "request": { - "$ref": "BulkInsertInstanceResource" - }, - "response": { - "$ref": "Operation" - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute" - ] - } - } - }, - "regionNetworkEndpointGroups": { + "regionInstanceTemplates": { "methods": { "delete": { - "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", - "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone.", + "flatPath": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", "httpMethod": "DELETE", - "id": "compute.regionNetworkEndpointGroups.delete", + "id": "compute.regionInstanceTemplates.delete", "parameterOrder": [ "project", "region", - "networkEndpointGroup" + "instanceTemplate" ], "parameters": { - "networkEndpointGroup": { - "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + "instanceTemplate": { + "description": "The name of the instance template to delete.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -21028,8 +21456,9 @@ "type": "string" }, "region": { - "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "description": "The name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -21039,7 +21468,7 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "path": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", "response": { "$ref": "Operation" }, @@ -21049,19 +21478,20 @@ ] }, "get": { - "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", - "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "description": "Returns the specified instance template.", + "flatPath": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", "httpMethod": "GET", - "id": "compute.regionNetworkEndpointGroups.get", + "id": "compute.regionInstanceTemplates.get", "parameterOrder": [ "project", "region", - "networkEndpointGroup" + "instanceTemplate" ], "parameters": { - "networkEndpointGroup": { - "description": "The name of the network endpoint group. It should comply with RFC1035.", + "instanceTemplate": { + "description": "The name of the instance template.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", "required": true, "type": "string" }, @@ -21073,15 +21503,16 @@ "type": "string" }, "region": { - "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "description": "The name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" } }, - "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "path": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", "response": { - "$ref": "NetworkEndpointGroup" + "$ref": "InstanceTemplate" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", @@ -21090,10 +21521,10 @@ ] }, "insert": { - "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", - "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", + "description": "Creates an instance template in the specified project and region using the global instance template whose URL is included in the request.", + "flatPath": "projects/{project}/regions/{region}/instanceTemplates", "httpMethod": "POST", - "id": "compute.regionNetworkEndpointGroups.insert", + "id": "compute.regionInstanceTemplates.insert", "parameterOrder": [ "project", "region" @@ -21107,8 +21538,9 @@ "type": "string" }, "region": { - "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", + "description": "The name of the region for this request.", "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" }, @@ -21118,9 +21550,9 @@ "type": "string" } }, - "path": "projects/{project}/regions/{region}/networkEndpointGroups", + "path": "projects/{project}/regions/{region}/instanceTemplates", "request": { - "$ref": "NetworkEndpointGroup" + "$ref": "InstanceTemplate" }, "response": { "$ref": "Operation" @@ -21131,10 +21563,250 @@ ] }, "list": { - "description": "Retrieves the list of regional network endpoint groups available to the specified project in the given region.", - "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", + "description": "Retrieves a list of instance templates that are contained within the specified project and region.", + "flatPath": "projects/{project}/regions/{region}/instanceTemplates", "httpMethod": "GET", - "id": "compute.regionNetworkEndpointGroups.list", + "id": "compute.regionInstanceTemplates.list", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "filter": { + "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + "location": "query", + "type": "string" + }, + "maxResults": { + "default": "500", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "orderBy": { + "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + "location": "query", + "type": "string" + }, + "pageToken": { + "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + "location": "query", + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the regions for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "returnPartialSuccess": { + "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + "location": "query", + "type": "boolean" + } + }, + "path": "projects/{project}/regions/{region}/instanceTemplates", + "response": { + "$ref": "InstanceTemplateList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + } + } + }, + "regionInstances": { + "methods": { + "bulkInsert": { + "description": "Creates multiple instances in a given region. Count specifies the number of instances to create.", + "flatPath": "projects/{project}/regions/{region}/instances/bulkInsert", + "httpMethod": "POST", + "id": "compute.regionInstances.bulkInsert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region for this request.", + "location": "path", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/instances/bulkInsert", + "request": { + "$ref": "BulkInsertInstanceResource" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, + "regionNetworkEndpointGroups": { + "methods": { + "delete": { + "description": "Deletes the specified network endpoint group. Note that the NEG cannot be deleted if it is configured as a backend of a backend service.", + "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "httpMethod": "DELETE", + "id": "compute.regionNetworkEndpointGroups.delete", + "parameterOrder": [ + "project", + "region", + "networkEndpointGroup" + ], + "parameters": { + "networkEndpointGroup": { + "description": "The name of the network endpoint group to delete. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "description": "Returns the specified network endpoint group.", + "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "httpMethod": "GET", + "id": "compute.regionNetworkEndpointGroups.get", + "parameterOrder": [ + "project", + "region", + "networkEndpointGroup" + ], + "parameters": { + "networkEndpointGroup": { + "description": "The name of the network endpoint group. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region where the network endpoint group is located. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", + "response": { + "$ref": "NetworkEndpointGroup" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "description": "Creates a network endpoint group in the specified project using the parameters that are included in the request.", + "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", + "httpMethod": "POST", + "id": "compute.regionNetworkEndpointGroups.insert", + "parameterOrder": [ + "project", + "region" + ], + "parameters": { + "project": { + "description": "Project ID for this request.", + "location": "path", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "required": true, + "type": "string" + }, + "region": { + "description": "The name of the region where you want to create the network endpoint group. It should comply with RFC1035.", + "location": "path", + "required": true, + "type": "string" + }, + "requestId": { + "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + "location": "query", + "type": "string" + } + }, + "path": "projects/{project}/regions/{region}/networkEndpointGroups", + "request": { + "$ref": "NetworkEndpointGroup" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "description": "Retrieves the list of regional network endpoint groups available to the specified project in the given region.", + "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups", + "httpMethod": "GET", + "id": "compute.regionNetworkEndpointGroups.list", "parameterOrder": [ "project", "region" @@ -23255,7 +23927,7 @@ ] }, "get": { - "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + "description": "Returns the specified TargetHttpProxy resource in the specified region.", "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", "httpMethod": "GET", "id": "compute.regionTargetHttpProxies.get", @@ -23504,7 +24176,7 @@ ] }, "get": { - "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + "description": "Returns the specified TargetHttpsProxy resource in the specified region.", "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", "httpMethod": "GET", "id": "compute.regionTargetHttpsProxies.get", @@ -24052,7 +24724,7 @@ ] }, "get": { - "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", + "description": "Returns the specified UrlMap resource.", "flatPath": "projects/{project}/regions/{region}/urlMaps/{urlMap}", "httpMethod": "GET", "id": "compute.regionUrlMaps.get", @@ -24349,7 +25021,7 @@ "regions": { "methods": { "get": { - "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request. To decrease latency for this method, you can optionally omit any unneeded information from the response by using a field mask. This practice is especially recommended for unused quota information (the `quotas` field). To exclude one or more fields, set your request's `fields` query parameter to only include the fields you need. For example, to only include the `id` and `selfLink` fields, add the query parameter `?fields=id,selfLink` to your request.", + "description": "Returns the specified Region resource. To decrease latency for this method, you can optionally omit any unneeded information from the response by using a field mask. This practice is especially recommended for unused quota information (the `quotas` field). To exclude one or more fields, set your request's `fields` query parameter to only include the fields you need. For example, to only include the `id` and `selfLink` fields, add the query parameter `?fields=id,selfLink` to your request.", "flatPath": "projects/{project}/regions/{region}", "httpMethod": "GET", "id": "compute.regions.get", @@ -25459,7 +26131,7 @@ ] }, "get": { - "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request.", + "description": "Returns the specified Router resource.", "flatPath": "projects/{project}/regions/{region}/routers/{router}", "httpMethod": "GET", "id": "compute.routers.get", @@ -25525,6 +26197,11 @@ "minimum": "0", "type": "integer" }, + "natName": { + "description": "Name of the nat service to filter the Nat Mapping information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", + "location": "query", + "type": "string" + }, "orderBy": { "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", "location": "query", @@ -25910,7 +26587,7 @@ ] }, "get": { - "description": "Returns the specified Route resource. Gets a list of available routes by making a list() request.", + "description": "Returns the specified Route resource.", "flatPath": "projects/{project}/global/routes/{route}", "httpMethod": "GET", "id": "compute.routes.get", @@ -27061,7 +27738,7 @@ ] }, "get": { - "description": "Returns the specified Snapshot resource. Gets a list of available snapshots by making a list() request.", + "description": "Returns the specified Snapshot resource.", "flatPath": "projects/{project}/global/snapshots/{snapshot}", "httpMethod": "GET", "id": "compute.snapshots.get", @@ -27441,7 +28118,7 @@ ] }, "get": { - "description": "Returns the specified SslCertificate resource. Gets a list of available SSL certificates by making a list() request.", + "description": "Returns the specified SslCertificate resource.", "flatPath": "projects/{project}/global/sslCertificates/{sslCertificate}", "httpMethod": "GET", "id": "compute.sslCertificates.get", @@ -27701,7 +28378,7 @@ ] }, "insert": { - "description": "Returns the specified SSL policy resource. Gets a list of available SSL policies by making a list() request.", + "description": "Returns the specified SSL policy resource.", "flatPath": "projects/{project}/global/sslPolicies", "httpMethod": "POST", "id": "compute.sslPolicies.insert", @@ -28047,7 +28724,7 @@ ] }, "get": { - "description": "Returns the specified subnetwork. Gets a list of available subnetworks list() request.", + "description": "Returns the specified subnetwork.", "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", "httpMethod": "GET", "id": "compute.subnetworks.get", @@ -28808,7 +29485,7 @@ ] }, "get": { - "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request.", + "description": "Returns the specified TargetHttpProxy resource.", "flatPath": "projects/{project}/global/targetHttpProxies/{targetHttpProxy}", "httpMethod": "GET", "id": "compute.targetHttpProxies.get", @@ -29119,7 +29796,7 @@ ] }, "get": { - "description": "Returns the specified TargetHttpsProxy resource. Gets a list of available target HTTPS proxies by making a list() request.", + "description": "Returns the specified TargetHttpsProxy resource.", "flatPath": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}", "httpMethod": "GET", "id": "compute.targetHttpsProxies.get", @@ -29603,7 +30280,7 @@ ] }, "get": { - "description": "Returns the specified TargetInstance resource. Gets a list of available target instances by making a list() request.", + "description": "Returns the specified TargetInstance resource.", "flatPath": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}", "httpMethod": "GET", "id": "compute.targetInstances.get", @@ -29962,7 +30639,7 @@ ] }, "get": { - "description": "Returns the specified target pool. Gets a list of available target pools by making a list() request.", + "description": "Returns the specified target pool.", "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}", "httpMethod": "GET", "id": "compute.targetPools.get", @@ -30355,7 +31032,7 @@ ] }, "get": { - "description": "Returns the specified TargetSslProxy resource. Gets a list of available target SSL proxies by making a list() request.", + "description": "Returns the specified TargetSslProxy resource.", "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}", "httpMethod": "GET", "id": "compute.targetSslProxies.get", @@ -30790,7 +31467,7 @@ ] }, "get": { - "description": "Returns the specified TargetTcpProxy resource. Gets a list of available target TCP proxies by making a list() request.", + "description": "Returns the specified TargetTcpProxy resource.", "flatPath": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}", "httpMethod": "GET", "id": "compute.targetTcpProxies.get", @@ -31109,7 +31786,7 @@ ] }, "get": { - "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request.", + "description": "Returns the specified target VPN gateway.", "flatPath": "projects/{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", "httpMethod": "GET", "id": "compute.targetVpnGateways.get", @@ -31410,7 +32087,7 @@ ] }, "get": { - "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", + "description": "Returns the specified UrlMap resource.", "flatPath": "projects/{project}/global/urlMaps/{urlMap}", "httpMethod": "GET", "id": "compute.urlMaps.get", @@ -31808,7 +32485,7 @@ ] }, "get": { - "description": "Returns the specified VPN gateway. Gets a list of available VPN gateways by making a list() request.", + "description": "Returns the specified VPN gateway.", "flatPath": "projects/{project}/regions/{region}/vpnGateways/{vpnGateway}", "httpMethod": "GET", "id": "compute.vpnGateways.get", @@ -32206,7 +32883,7 @@ ] }, "get": { - "description": "Returns the specified VpnTunnel resource. Gets a list of available VPN tunnels by making a list() request.", + "description": "Returns the specified VpnTunnel resource.", "flatPath": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", "httpMethod": "GET", "id": "compute.vpnTunnels.get", @@ -32600,7 +33277,7 @@ "zones": { "methods": { "get": { - "description": "Returns the specified Zone resource. Gets a list of available zones by making a list() request.", + "description": "Returns the specified Zone resource.", "flatPath": "projects/{project}/zones/{zone}", "httpMethod": "GET", "id": "compute.zones.get", @@ -32692,7 +33369,7 @@ } } }, - "revision": "20221126", + "revision": "20230516", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -33112,11 +33789,11 @@ "id": "AccessConfig", "properties": { "externalIpv6": { - "description": "The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically.", + "description": "Applies to ipv6AccessConfigs only. The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork.", "type": "string" }, "externalIpv6PrefixLength": { - "description": "The prefix length of the external IPv6 range.", + "description": "Applies to ipv6AccessConfigs only. The prefix length of the external IPv6 range.", "format": "int32", "type": "integer" }, @@ -33126,11 +33803,11 @@ "type": "string" }, "name": { - "description": "The name of this access configuration. The default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access.", + "description": "The name of this access configuration. In accessConfigs (IPv4), the default and recommended name is External NAT, but you can use any arbitrary string, such as My external IP or Network Access. In ipv6AccessConfigs, the recommend name is External IPv6.", "type": "string" }, "natIP": { - "description": "An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.", + "description": "Applies to accessConfigs (IPv4) only. An external IP address associated with this instance. Specify an unused static external IP address available to the project or leave this field undefined to use an IP from a shared ephemeral IP address pool. If you specify a static external IP address, it must live in the same region as the zone of the instance.", "type": "string" }, "networkTier": { @@ -33158,8 +33835,7 @@ "type": "boolean" }, "type": { - "default": "ONE_TO_ONE_NAT", - "description": "The type of configuration. The default and only option is ONE_TO_ONE_NAT.", + "description": "The type of configuration. In accessConfigs (IPv4), the default and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default and only option is DIRECT_IPV6.", "enum": [ "DIRECT_IPV6", "ONE_TO_ONE_NAT" @@ -33239,6 +33915,18 @@ "description": "[Output Only] Type of the resource. Always compute#address for addresses.", "type": "string" }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this Address, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an Address.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", + "type": "object" + }, "name": { "annotations": { "required": [ @@ -33723,6 +34411,28 @@ }, "type": "object" }, + "AllocationResourceStatus": { + "description": "[Output Only] Contains output only fields.", + "id": "AllocationResourceStatus", + "properties": { + "specificSkuAllocation": { + "$ref": "AllocationResourceStatusSpecificSKUAllocation", + "description": "Allocation Properties of this reservation." + } + }, + "type": "object" + }, + "AllocationResourceStatusSpecificSKUAllocation": { + "description": "Contains Properties set for the reservation.", + "id": "AllocationResourceStatusSpecificSKUAllocation", + "properties": { + "sourceInstanceTemplateId": { + "description": "ID of the instance template used to populate reservation properties.", + "type": "string" + } + }, + "type": "object" + }, "AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk": { "id": "AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk", "properties": { @@ -33801,6 +34511,10 @@ "instanceProperties": { "$ref": "AllocationSpecificSKUAllocationReservedInstanceProperties", "description": "The instance properties for the reservation." + }, + "sourceInstanceTemplate": { + "description": "Specifies the instance template to create the reservation. If you use this field, you must exclude the instanceProperties field. This field is optional, and it can be a full or partial URL. For example, the following are all valid URLs to an instance template: - https://www.googleapis.com/compute/v1/projects/project /global/instanceTemplates/instanceTemplate - projects/project/global/instanceTemplates/instanceTemplate - global/instanceTemplates/instanceTemplate ", + "type": "string" } }, "type": "object" @@ -33900,6 +34614,18 @@ ], "type": "string" }, + "savedState": { + "description": "For LocalSSD disks on VM Instances in STOPPED or SUSPENDED state, this field is set to PRESERVED if the LocalSSD data has been saved to a persistent location by customer request. (see the discard_local_ssd option on Stop/Suspend). Read-only in the api.", + "enum": [ + "DISK_SAVED_STATE_UNSPECIFIED", + "PRESERVED" + ], + "enumDescriptions": [ + "*[Default]* Disk state has not been preserved.", + "Disk state has been preserved." + ], + "type": "string" + }, "shieldedInstanceInitialState": { "$ref": "InitialStateConfig", "description": "[Output Only] shielded vm initial state stored on disk" @@ -33991,6 +34717,18 @@ "format": "int64", "type": "string" }, + "provisionedThroughput": { + "description": "Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be between 1 and 7,124.", + "format": "int64", + "type": "string" + }, + "replicaZones": { + "description": "Required for each regional disk associated with the instance. Specify the URLs of the zones where the disk should be replicated to. You must provide exactly two replica zones, and one zone must be the same as the instance zone. You can't use this option with boot disks.", + "items": { + "type": "string" + }, + "type": "array" + }, "resourceManagerTags": { "additionalProperties": { "type": "string" @@ -34610,7 +35348,7 @@ "id": "AutoscalingPolicy", "properties": { "coolDownPeriodSec": { - "description": "The number of seconds that the autoscaler waits before it starts collecting information from a new instance. This prevents the autoscaler from collecting information when the instance is initializing, during which the collected usage would not be reliable. The default time autoscaler waits is 60 seconds. Virtual machine initialization times might vary because of numerous factors. We recommend that you test how long an instance may take to initialize. To do this, create an instance and time the startup process.", + "description": "The number of seconds that your application takes to initialize on a VM instance. This is referred to as the [initialization period](/compute/docs/autoscaler#cool_down_period). Specifying an accurate initialization period improves autoscaler decisions. For example, when scaling out, the autoscaler ignores data from VMs that are still initializing because those VMs might not yet represent normal usage of your application. The default initialization period is 60 seconds. Initialization periods might vary because of numerous factors. We recommend that you test how long your application takes to initialize. To do this, create a VM and time your application's startup process.", "format": "int32", "type": "integer" }, @@ -34640,7 +35378,7 @@ "type": "integer" }, "mode": { - "description": "Defines operating mode for this policy.", + "description": "Defines the operating mode for this policy. The following modes are available: - OFF: Disables the autoscaler but maintains its configuration. - ONLY_SCALE_OUT: Restricts the autoscaler to add VM instances only. - ON: Enables all autoscaler activities according to its policy. For more information, see \"Turning off or restricting an autoscaler\"", "enum": [ "OFF", "ON", @@ -35311,7 +36049,7 @@ "type": "string" }, "localityLbPolicies": { - "description": "A list of locality load balancing policies to be used in order of preference. Either the policy or the customPolicy field should be set. Overrides any value set in the localityLbPolicy field. localityLbPolicies is only supported when the BackendService is referenced by a URL Map that is referenced by a target gRPC proxy that has the validateForProxyless field set to true.", + "description": "A list of locality load-balancing policies to be used in order of preference. When you use localityLbPolicies, you must set at least one value for either the localityLbPolicies[].policy or the localityLbPolicies[].customPolicy field. localityLbPolicies overrides any value set in the localityLbPolicy field. For an example of how to use this field, see Define a list of preferred policies. Caution: This field and its children are intended for use in a service mesh that includes gRPC clients only. Envoy proxies can't use backend services that have this configuration.", "items": { "$ref": "BackendServiceLocalityLoadBalancingPolicyConfig" }, @@ -35326,7 +36064,8 @@ "ORIGINAL_DESTINATION", "RANDOM", "RING_HASH", - "ROUND_ROBIN" + "ROUND_ROBIN", + "WEIGHTED_MAGLEV" ], "enumDescriptions": [ "", @@ -35335,7 +36074,8 @@ "Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer.", "The load balancer selects a random healthy host.", "The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests.", - "This is a simple policy in which each healthy backend is selected in round robin order. This is the default." + "This is a simple policy in which each healthy backend is selected in round robin order. This is the default.", + "Per-instance weighted Load Balancing via health check reported weights. If set, the Backend Service must configure a non legacy HTTP-based Health Check, and health check replies are expected to contain non-standard HTTP response header field X-Load-Balancing-Endpoint-Weight to specify the per-instance weights. If set, Load Balancing is weighted based on the per-instance weights reported in the last processed health check replies, as long as every instance either reported a valid weight or had UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains equal-weight. This option is only supported in Network Load Balancing." ], "type": "string" }, @@ -35347,6 +36087,13 @@ "$ref": "Duration", "description": "Specifies the default maximum duration (timeout) for streams to this service. Duration is computed from the beginning of the stream until the response has been completely processed, including all retries. A stream that does not complete in this duration is closed. If not specified, there will be no timeout limit, i.e. the maximum duration is infinite. This value can be overridden in the PathMatcher configuration of the UrlMap that references this backend service. This field is only allowed when the loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED." }, + "metadatas": { + "additionalProperties": { + "type": "string" + }, + "description": "Deployment metadata associated with the resource to be set by a GKE hub controller and read by the backend RCTH", + "type": "object" + }, "name": { "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -35942,7 +36689,7 @@ "type": "string" }, "name": { - "description": "Identifies the custom policy. The value should match the type the custom implementation is registered with on the gRPC clients. It should follow protocol buffer message naming conventions and include the full path (e.g. myorg.CustomLbPolicy). The maximum length is 256 characters. Note that specifying the same custom policy more than once for a backend is not a valid configuration and will be rejected.", + "description": "Identifies the custom policy. The value should match the name of a custom implementation registered on the gRPC clients. It should follow protocol buffer message naming conventions and include the full path (for example, myorg.CustomLbPolicy). The maximum length is 256 characters. Do not specify the same custom policy more than once for a backend. If you do, the configuration is rejected. For an example of how to use this field, see Use a custom policy.", "type": "string" } }, @@ -35953,7 +36700,7 @@ "id": "BackendServiceLocalityLoadBalancingPolicyConfigPolicy", "properties": { "name": { - "description": "The name of a locality load balancer policy to be used. The value should be one of the predefined ones as supported by localityLbPolicy, although at the moment only ROUND_ROBIN is supported. This field should only be populated when the customPolicy field is not used. Note that specifying the same policy more than once for a backend is not a valid configuration and will be rejected.", + "description": "The name of a locality load-balancing policy. Valid values include ROUND_ROBIN and, for Java clients, LEAST_REQUEST. For information about these values, see the description of localityLbPolicy. Do not specify the same policy more than once for a backend. If you do, the configuration is rejected.", "enum": [ "INVALID_LB_POLICY", "LEAST_REQUEST", @@ -35961,7 +36708,8 @@ "ORIGINAL_DESTINATION", "RANDOM", "RING_HASH", - "ROUND_ROBIN" + "ROUND_ROBIN", + "WEIGHTED_MAGLEV" ], "enumDescriptions": [ "", @@ -35970,7 +36718,8 @@ "Backend host is selected based on the client connection metadata, i.e., connections are opened to the same address as the destination address of the incoming connection before the connection was redirected to the load balancer.", "The load balancer selects a random healthy host.", "The ring/modulo hash load balancer implements consistent hashing to backends. The algorithm has the property that the addition/removal of a host from a set of N hosts only affects 1/N of the requests.", - "This is a simple policy in which each healthy backend is selected in round robin order. This is the default." + "This is a simple policy in which each healthy backend is selected in round robin order. This is the default.", + "Per-instance weighted Load Balancing via health check reported weights. If set, the Backend Service must configure a non legacy HTTP-based Health Check, and health check replies are expected to contain non-standard HTTP response header field X-Load-Balancing-Endpoint-Weight to specify the per-instance weights. If set, Load Balancing is weighted based on the per-instance weights reported in the last processed health check replies, as long as every instance either reported a valid weight or had UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains equal-weight. This option is only supported in Network Load Balancing." ], "type": "string" } @@ -35985,6 +36734,27 @@ "description": "Denotes whether to enable logging for the load balancer traffic served by this backend service. The default value is false.", "type": "boolean" }, + "optionalFields": { + "description": "This field can only be specified if logging is enabled for this backend service and \"logConfig.optionalMode\" was set to CUSTOM. Contains a list of optional fields you want to include in the logs. For example: serverInstance, serverGkeDetails.cluster, serverGkeDetails.pod.podNamespace", + "items": { + "type": "string" + }, + "type": "array" + }, + "optionalMode": { + "description": "This field can only be specified if logging is enabled for this backend service. Configures whether all, none or a subset of optional fields should be added to the reported logs. One of [INCLUDE_ALL_OPTIONAL, EXCLUDE_ALL_OPTIONAL, CUSTOM]. Default is EXCLUDE_ALL_OPTIONAL.", + "enum": [ + "CUSTOM", + "EXCLUDE_ALL_OPTIONAL", + "INCLUDE_ALL_OPTIONAL" + ], + "enumDescriptions": [ + "A subset of optional fields.", + "None optional fields.", + "All optional fields." + ], + "type": "string" + }, "sampleRate": { "description": "This field can only be specified if logging is enabled for this backend service. The value of the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. The default value is 1.0.", "format": "float", @@ -36360,7 +37130,7 @@ "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the principals in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." }, "members": { - "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. ", + "description": "Specifies the principals requesting access for a Google Cloud resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. Does not include identities that come from external identity providers (IdPs) through identity federation. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@example.com` . * `serviceAccount:{emailid}`: An email address that represents a Google service account. For example, `my-other-app@appspot.gserviceaccount.com`. * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An identifier for a [Kubernetes service account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `alice@example.com?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `admins@example.com?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding.", "items": { "type": "string" }, @@ -36613,7 +37383,7 @@ "type": "string" }, "splitSourceCommitment": { - "description": "Source commitment to be splitted into a new commitment.", + "description": "Source commitment to be split into a new commitment.", "type": "string" }, "startTimestamp": { @@ -36631,7 +37401,7 @@ ], "enumDescriptions": [ "", - "", + "Deprecate CANCELED status. Will use separate status to differentiate cancel by mergeCud or manual cancellation.", "", "", "" @@ -36648,11 +37418,13 @@ "ACCELERATOR_OPTIMIZED", "COMPUTE_OPTIMIZED", "COMPUTE_OPTIMIZED_C2D", + "COMPUTE_OPTIMIZED_C3", "GENERAL_PURPOSE", "GENERAL_PURPOSE_E2", "GENERAL_PURPOSE_N2", "GENERAL_PURPOSE_N2D", "GENERAL_PURPOSE_T2D", + "GRAPHICS_OPTIMIZED", "MEMORY_OPTIMIZED", "MEMORY_OPTIMIZED_M3", "TYPE_UNSPECIFIED" @@ -36668,6 +37440,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -37318,6 +38092,17 @@ ], "type": "string" }, + "asyncPrimaryDisk": { + "$ref": "DiskAsyncReplication", + "description": "Disk asynchronously replicated into this disk." + }, + "asyncSecondaryDisks": { + "additionalProperties": { + "$ref": "DiskAsyncReplicationList" + }, + "description": "[Output Only] A list of disks this disk is asynchronously replicated to.", + "type": "object" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -37414,6 +38199,11 @@ "format": "int64", "type": "string" }, + "provisionedThroughput": { + "description": "Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle. Values must be between 1 and 7,124.", + "format": "int64", + "type": "string" + }, "region": { "description": "[Output Only] URL of the region where the disk resides. Only applicable for regional resources. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" @@ -37432,6 +38222,10 @@ }, "type": "array" }, + "resourceStatus": { + "$ref": "DiskResourceStatus", + "description": "[Output Only] Status information for the disk resource." + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -37445,6 +38239,14 @@ "format": "int64", "type": "string" }, + "sourceConsistencyGroupPolicy": { + "description": "[Output Only] URL of the DiskConsistencyGroupPolicy for a secondary disk that was created using a consistency group.", + "type": "string" + }, + "sourceConsistencyGroupPolicyId": { + "description": "[Output Only] ID of the DiskConsistencyGroupPolicy for a secondary disk that was created using a consistency group.", + "type": "string" + }, "sourceDisk": { "description": "The source disk used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - https://www.googleapis.com/compute/v1/projects/project/regions/region /disks/disk - projects/project/zones/zone/disks/disk - projects/project/regions/region/disks/disk - zones/zone/disks/disk - regions/region/disks/disk ", "type": "string" @@ -37644,6 +38446,37 @@ }, "type": "object" }, + "DiskAsyncReplication": { + "id": "DiskAsyncReplication", + "properties": { + "consistencyGroupPolicy": { + "description": "[Output Only] URL of the DiskConsistencyGroupPolicy if replication was started on the disk as a member of a group.", + "type": "string" + }, + "consistencyGroupPolicyId": { + "description": "[Output Only] ID of the DiskConsistencyGroupPolicy if replication was started on the disk as a member of a group.", + "type": "string" + }, + "disk": { + "description": "The other disk asynchronously replicated to or from the current disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: - https://www.googleapis.com/compute/v1/projects/project/zones/zone /disks/disk - projects/project/zones/zone/disks/disk - zones/zone/disks/disk ", + "type": "string" + }, + "diskId": { + "description": "[Output Only] The unique ID of the other disk asynchronously replicated to or from the current disk. This value identifies the exact disk that was used to create this replication. For example, if you started replicating the persistent disk from a disk that was later deleted and recreated under the same name, the disk ID would identify the exact version of the disk that was used.", + "type": "string" + } + }, + "type": "object" + }, + "DiskAsyncReplicationList": { + "id": "DiskAsyncReplicationList", + "properties": { + "asyncReplicationDisk": { + "$ref": "DiskAsyncReplication" + } + }, + "type": "object" + }, "DiskInstantiationConfig": { "description": "A specification of the desired way to instantiate a disk in the instance template when its created from a source instance.", "id": "DiskInstantiationConfig", @@ -37833,6 +38666,47 @@ }, "type": "object" }, + "DiskResourceStatus": { + "id": "DiskResourceStatus", + "properties": { + "asyncPrimaryDisk": { + "$ref": "DiskResourceStatusAsyncReplicationStatus" + }, + "asyncSecondaryDisks": { + "additionalProperties": { + "$ref": "DiskResourceStatusAsyncReplicationStatus" + }, + "description": "Key: disk, value: AsyncReplicationStatus message", + "type": "object" + } + }, + "type": "object" + }, + "DiskResourceStatusAsyncReplicationStatus": { + "id": "DiskResourceStatusAsyncReplicationStatus", + "properties": { + "state": { + "enum": [ + "ACTIVE", + "CREATED", + "STARTING", + "STATE_UNSPECIFIED", + "STOPPED", + "STOPPING" + ], + "enumDescriptions": [ + "Replication is active.", + "Secondary disk is created and is waiting for replication to start.", + "Replication is starting.", + "", + "Replication is stopped.", + "Replication is stopping." + ], + "type": "string" + } + }, + "type": "object" + }, "DiskType": { "description": "Represents a Disk Type resource. Google Compute Engine has two Disk Type resources: * [Regional](/compute/docs/reference/rest/v1/regionDiskTypes) * [Zonal](/compute/docs/reference/rest/v1/diskTypes) You can choose from a variety of disk types based on your needs. For more information, read Storage options. The diskTypes resource represents disk types for a zonal persistent disk. For more information, read Zonal persistent disks. The regionDiskTypes resource represents disk types for a regional persistent disk. For more information, read Regional persistent disks.", "id": "DiskType", @@ -39420,7 +40294,7 @@ "id": "FirewallPolicyRule", "properties": { "action": { - "description": "The Action to perform when the client connection triggers the rule. Can currently be either \"allow\" or \"deny()\" where valid values for status are 403, 404, and 502.", + "description": "The Action to perform when the client connection triggers the rule. Valid actions are \"allow\", \"deny\" and \"goto_next\".", "type": "string" }, "description": { @@ -39498,6 +40372,20 @@ "description": "Represents a match condition that incoming traffic is evaluated against. Exactly one field must be specified.", "id": "FirewallPolicyRuleMatcher", "properties": { + "destAddressGroups": { + "description": "Address groups which should be matched against the traffic destination. Maximum number of destination address groups is 10.", + "items": { + "type": "string" + }, + "type": "array" + }, + "destFqdns": { + "description": "Fully Qualified Domain Name (FQDN) which should be matched against traffic destination. Maximum number of destination fqdn allowed is 100.", + "items": { + "type": "string" + }, + "type": "array" + }, "destIpRanges": { "description": "CIDR IP address range. Maximum number of destination CIDR IP ranges allowed is 5000.", "items": { @@ -39505,6 +40393,20 @@ }, "type": "array" }, + "destRegionCodes": { + "description": "Region codes whose IP addresses will be used to match for destination of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex.\"US\" Maximum number of dest region codes allowed is 5000.", + "items": { + "type": "string" + }, + "type": "array" + }, + "destThreatIntelligences": { + "description": "Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic destination.", + "items": { + "type": "string" + }, + "type": "array" + }, "layer4Configs": { "description": "Pairs of IP protocols and ports that the rule should match.", "items": { @@ -39512,6 +40414,20 @@ }, "type": "array" }, + "srcAddressGroups": { + "description": "Address groups which should be matched against the traffic source. Maximum number of source address groups is 10.", + "items": { + "type": "string" + }, + "type": "array" + }, + "srcFqdns": { + "description": "Fully Qualified Domain Name (FQDN) which should be matched against traffic source. Maximum number of source fqdn allowed is 100.", + "items": { + "type": "string" + }, + "type": "array" + }, "srcIpRanges": { "description": "CIDR IP address range. Maximum number of source CIDR IP ranges allowed is 5000.", "items": { @@ -39519,12 +40435,26 @@ }, "type": "array" }, + "srcRegionCodes": { + "description": "Region codes whose IP addresses will be used to match for source of traffic. Should be specified as 2 letter country code defined as per ISO 3166 alpha-2 country codes. ex.\"US\" Maximum number of source region codes allowed is 5000.", + "items": { + "type": "string" + }, + "type": "array" + }, "srcSecureTags": { "description": "List of secure tag values, which should be matched at the source of the traffic. For INGRESS rule, if all the srcSecureTag are INEFFECTIVE, and there is no srcIpRange, this rule will be ignored. Maximum number of source tag values allowed is 256.", "items": { "$ref": "FirewallPolicyRuleSecureTag" }, "type": "array" + }, + "srcThreatIntelligences": { + "description": "Names of Network Threat Intelligence lists. The IPs in these lists will be matched against traffic source.", + "items": { + "type": "string" + }, + "type": "array" } }, "type": "object" @@ -39629,10 +40559,18 @@ "description": "This field is used along with the backend_service field for internal load balancing or with the target field for internal TargetInstance. If the field is set to TRUE, clients can access ILB from all regions. Otherwise only allows access from clients in the same region as the internal load balancer.", "type": "boolean" }, + "allowPscGlobalAccess": { + "description": "This is used in PSC consumer ForwardingRule to control whether the PSC endpoint can be accessed from another region.", + "type": "boolean" + }, "backendService": { "description": "Identifies the backend service to which the forwarding rule sends traffic. Required for Internal TCP/UDP Load Balancing and Network Load Balancing; must be omitted for all other load balancer types.", "type": "string" }, + "baseForwardingRule": { + "description": "[Output Only] The URL for the corresponding base Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule that has the same IP address, protocol, and port settings with the current Forwarding Rule, but without sourceIPRanges specified. Always empty if the current Forwarding Rule does not have sourceIPRanges specified.", + "type": "string" + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -39719,7 +40657,7 @@ "type": "string" }, "network": { - "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If this field is not specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", + "description": "This field is not used for external load balancing. For Internal TCP/UDP Load Balancing, this field identifies the network that the load balanced IP should belong to for this Forwarding Rule. If the subnetwork is specified, the network of the subnetwork will be used. If neither subnetwork nor this field is specified, the default network will be used. For Private Service Connect forwarding rules that forward traffic to Google APIs, a network must be provided.", "type": "string" }, "networkTier": { @@ -39801,12 +40739,19 @@ "description": "[Output Only] The internal fully qualified service name for this Forwarding Rule. This field is only used for internal load balancing.", "type": "string" }, + "sourceIpRanges": { + "description": "If not empty, this Forwarding Rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a Forwarding Rule can only have up to 64 source IP ranges, and this field can only be used with a regional Forwarding Rule whose scheme is EXTERNAL. Each source_ip_range entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24).", + "items": { + "type": "string" + }, + "type": "array" + }, "subnetwork": { "description": "This field identifies the subnetwork that the load balanced IP should belong to for this Forwarding Rule, used in internal load balancing and network load balancing with IPv6. If the network specified is in auto subnet mode, this field is optional. However, a subnetwork must be specified if the network is in custom subnet mode or when creating external forwarding rule with IPv6.", "type": "string" }, "target": { - "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must be in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. For more information, see the \"Target\" column in [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: - vpc-sc - APIs that support VPC Service Controls. - all-apis - All supported Google APIs. ", + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must be in the same region as the forwarding rule. For global forwarding rules, this target must be a global load balancing resource. The forwarded traffic must be of a type appropriate to the target object. - For load balancers, see the \"Target\" column in [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). - For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: - vpc-sc - APIs that support VPC Service Controls. - all-apis - All supported Google APIs. - For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. ", "type": "string" } }, @@ -40222,6 +41167,20 @@ }, "type": "object" }, + "GlobalAddressesMoveRequest": { + "id": "GlobalAddressesMoveRequest", + "properties": { + "description": { + "description": "An optional destination address description if intended to be different from the source.", + "type": "string" + }, + "destinationAddress": { + "description": "The URL of the destination address to move to. This can be a full or partial URL. For example, the following are all valid URLs to a address: - https://www.googleapis.com/compute/v1/projects/project /global/addresses/address - projects/project/global/addresses/address Note that destination project must be different from the source project. So /global/addresses/address is not valid partial url.", + "type": "string" + } + }, + "type": "object" + }, "GlobalNetworkEndpointGroupsAttachEndpointsRequest": { "id": "GlobalNetworkEndpointGroupsAttachEndpointsRequest", "properties": { @@ -40379,13 +41338,15 @@ "id": "GuestOsFeature", "properties": { "type": { - "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_SNP_CAPABLE For more information, see Enabling guest operating system features.", + "description": "The ID of a supported feature. To add multiple values, use commas to separate values. Set to one or more of the following values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see Enabling guest operating system features.", "enum": [ "FEATURE_TYPE_UNSPECIFIED", "GVNIC", "MULTI_IP_SUBNET", "SECURE_BOOT", "SEV_CAPABLE", + "SEV_LIVE_MIGRATABLE", + "SEV_SNP_CAPABLE", "UEFI_COMPATIBLE", "VIRTIO_SCSI_MULTIQUEUE", "WINDOWS" @@ -40398,6 +41359,8 @@ "", "", "", + "", + "", "" ], "type": "string" @@ -40565,7 +41528,7 @@ "type": "object" }, "HealthCheck": { - "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/v1/healthChecks) * [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.HealthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.HealthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", + "description": "Represents a Health Check resource. Google Compute Engine has two Health Check resources: * [Global](/compute/docs/reference/rest/v1/healthChecks) * [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) Internal HTTP(S) load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Traffic Director must use global health checks (`compute.v1.healthChecks`). Internal TCP/UDP load balancers can use either regional or global health checks (`compute.v1.regionHealthChecks` or `compute.v1.healthChecks`). External HTTP(S), TCP proxy, and SSL proxy load balancers as well as managed instance group auto-healing must use global health checks (`compute.v1.healthChecks`). Backend service-based network load balancers must use regional health checks (`compute.v1.regionHealthChecks`). Target pool-based network load balancers must use legacy HTTP health checks (`compute.v1.httpHealthChecks`). For more information, see Health checks overview.", "id": "HealthCheck", "properties": { "checkIntervalSec": { @@ -41260,7 +42223,7 @@ "type": "string" }, "healthState": { - "description": "Health state of the instance.", + "description": "Health state of the IPv4 address of the instance.", "enum": [ "HEALTHY", "UNHEALTHY" @@ -41333,10 +42296,10 @@ "UNKNOWN" ], "enumDescriptions": [ - "", - "", - "", - "" + "Endpoint is being drained.", + "Endpoint is healthy.", + "Endpoint is unhealthy.", + "Health status of the endpoint is unknown." ], "type": "string" } @@ -41582,7 +42545,7 @@ "type": "integer" }, "requestPath": { - "description": "The request path of the HTTP health check request. The default value is /. This field does not support query parameters.", + "description": "The request path of the HTTP health check request. The default value is /. This field does not support query parameters. Must comply with RFC3986.", "type": "string" }, "selfLink": { @@ -41919,6 +42882,10 @@ }, "type": "array" }, + "pathTemplateMatch": { + "description": "If specified, the route is a pattern match expression that must match the :path header once the query string is removed. A pattern match allows you to match - The value must be between 1 and 1024 characters - The pattern must start with a leading slash (\"/\") - There may be no more than 5 operators in pattern Precisely one of prefix_match, full_path_match, regex_match or path_template_match must be set.", + "type": "string" + }, "prefixMatch": { "description": "For satisfying the matchRule condition, the request's path must begin with the specified prefixMatch. prefixMatch must begin with a /. The value must be from 1 to 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must be specified.", "type": "string" @@ -41984,7 +42951,7 @@ "type": "integer" }, "requestPath": { - "description": "The request path of the HTTPS health check request. The default value is \"/\".", + "description": "The request path of the HTTPS health check request. The default value is \"/\". Must comply with RFC3986.", "type": "string" }, "selfLink": { @@ -42165,7 +43132,7 @@ "type": "string" }, "family": { - "description": "The name of the image family to which this image belongs. You can create disks by specifying an image family instead of a specific image name. The image family always returns its latest image that is not deprecated. The name of the image family must comply with RFC1035.", + "description": "The name of the image family to which this image belongs. The image family name can be from a publicly managed image family provided by Compute Engine, or from a custom image family you create. For example, centos-stream-9 is a publicly available image family. For more information, see Image family best practices. When creating disks, you can specify an image family instead of a specific image name. The image family always returns its latest image that is not deprecated. The name of the image family must comply with RFC1035.", "type": "string" }, "guestOsFeatures": { @@ -42605,11 +43572,6 @@ "type": "string" }, "machineType": { - "annotations": { - "required": [ - "compute.instances.insert" - ] - }, "description": "Full or partial URL of the machine type resource to use for this instance, in the format: zones/zone/machineTypes/machine-type. This is provided by the client when the instance is created. For example, the following is a valid partial url to a predefined machine type: zones/us-central1-f/machineTypes/n1-standard-1 To create a custom machine type, provide a URL to a machine type in the following format, where CPUS is 1 or an even number up to 32 (2, 4, 6, ... 24, etc), and MEMORY is the total memory for this instance. Memory must be a multiple of 256 MB and must be supplied in MB (e.g. 5 GB of memory is 5120 MB): zones/zone/machineTypes/custom-CPUS-MEMORY For example: zones/us-central1-f/machineTypes/custom-4-5120 For a full list of restrictions, read the Specifications for custom machine types.", "type": "string" }, @@ -42726,7 +43688,7 @@ "TERMINATED" ], "enumDescriptions": [ - "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", "Resources are being allocated for the instance.", "The instance is in repair.", "The instance is running.", @@ -43588,7 +44550,7 @@ "type": "string" }, "initialDelaySec": { - "description": "The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. This initial delay allows instances to initialize and run their startup scripts before the instance group determines that they are UNHEALTHY. This prevents the managed instance group from recreating its instances prematurely. This value must be from range [0, 3600].", + "description": "The initial delay is the number of seconds that a new VM takes to initialize and run its startup script. During a VM's initial delay period, the MIG ignores unsuccessful health checks because the VM might be in the startup process. This prevents the MIG from prematurely recreating a VM. If the health check receives a healthy response during the initial delay, it indicates that the startup process is complete and the VM is ready. The value of initial delay must be between 0 and 3600 seconds. The default value is 0.", "format": "int32", "type": "integer" } @@ -43952,7 +44914,7 @@ "id": "InstanceGroupManagersDeleteInstancesRequest", "properties": { "instances": { - "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME].", + "description": "The URLs of one or more instances to delete. This can be a full URL or a partial URL, such as zones/[ZONE]/instances/[INSTANCE_NAME]. Queued instances do not have URL and can be deleted only by name. One cannot specify both URLs and names in a single request.", "items": { "type": "string" }, @@ -45103,6 +46065,10 @@ "$ref": "InstanceProperties", "description": "The instance properties for this instance template." }, + "region": { + "description": "[Output Only] URL of the region where the instance template resides. Only applicable for regional resources.", + "type": "string" + }, "selfLink": { "description": "[Output Only] The URL for this instance template. The server defines this URL.", "type": "string" @@ -45118,6 +46084,127 @@ }, "type": "object" }, + "InstanceTemplateAggregatedList": { + "description": "Contains a list of InstanceTemplatesScopedList.", + "id": "InstanceTemplateAggregatedList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "additionalProperties": { + "$ref": "InstanceTemplatesScopedList", + "description": "The name of the scope that contains this set of instance templates." + }, + "description": "A list of InstanceTemplatesScopedList resources.", + "type": "object" + }, + "kind": { + "default": "compute#instanceTemplateAggregatedList", + "description": "Type of resource.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "InstanceTemplateList": { "description": "A list of instance templates.", "id": "InstanceTemplateList", @@ -45238,6 +46325,108 @@ }, "type": "object" }, + "InstanceTemplatesScopedList": { + "id": "InstanceTemplatesScopedList", + "properties": { + "instanceTemplates": { + "description": "[Output Only] A list of instance templates that are contained within the specified project and zone.", + "items": { + "$ref": "InstanceTemplate" + }, + "type": "array" + }, + "warning": { + "description": "[Output Only] An informational warning that replaces the list of instance templates when the list is empty.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, "InstanceWithNamedPorts": { "id": "InstanceWithNamedPorts", "properties": { @@ -45267,7 +46456,7 @@ "TERMINATED" ], "enumDescriptions": [ - "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", "Resources are being allocated for the instance.", "The instance is in repair.", "The instance is running.", @@ -45522,6 +46711,20 @@ }, "type": "object" }, + "InstancesSetNameRequest": { + "id": "InstancesSetNameRequest", + "properties": { + "currentName": { + "description": "The current name of this resource, used to prevent conflicts. Provide the latest name when making a request to change name.", + "type": "string" + }, + "name": { + "description": "The name to be applied to the instance. Needs to be RFC 1035 compliant.", + "type": "string" + } + }, + "type": "object" + }, "InstancesSetServiceAccountRequest": { "id": "InstancesSetServiceAccountRequest", "properties": { @@ -45570,7 +46773,7 @@ "type": "object" }, "Interconnect": { - "description": "Represents an Interconnect resource. An Interconnect resource is a dedicated connection between the GCP network and your on-premises network. For more information, read the Dedicated Interconnect Overview.", + "description": "Represents an Interconnect resource. An Interconnect resource is a dedicated connection between the Google Cloud network and your on-premises network. For more information, read the Dedicated Interconnect Overview.", "id": "Interconnect", "properties": { "adminEnabled": { @@ -45642,6 +46845,18 @@ "description": "[Output Only] Type of the resource. Always compute#interconnect for interconnects.", "type": "string" }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this Interconnect, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an Interconnect.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", + "type": "object" + }, "linkType": { "description": "Type of link requested, which can take one of the following values: - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics - LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that this field indicates the speed of each of the links in the bundle, not the speed of the entire bundle.", "enum": [ @@ -45669,7 +46884,7 @@ "type": "string" }, "nocContactEmail": { - "description": "Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Stackdriver logs alerting and Cloud Notifications.", + "description": "Email address to contact the customer NOC for operations and maintenance notifications regarding this Interconnect. If specified, this will be used for notifications in addition to all other forms described, such as Cloud Monitoring logs alerting and Cloud Notifications. This field is required for users who sign up for Cloud Interconnect using workforce identity federation.", "type": "string" }, "operationalStatus": { @@ -45693,13 +46908,17 @@ "format": "int32", "type": "integer" }, + "remoteLocation": { + "description": "Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside of Google's network that the interconnect is connected to.", + "type": "string" + }, "requestedLinkCount": { "description": "Target number of physical links in the link bundle, as requested by the customer.", "format": "int32", "type": "integer" }, "satisfiesPzs": { - "description": "[Output Only] Set to true if the resource satisfies the zone separation organization policy constraints and false otherwise. Defaults to false if the field is not present.", + "description": "[Output Only] Reserved for future use.", "type": "boolean" }, "selfLink": { @@ -45787,6 +47006,10 @@ "description": "This field is not available.", "type": "string" }, + "configurationConstraints": { + "$ref": "InterconnectAttachmentConfigurationConstraints", + "description": "[Output Only] Constraints for this attachment, if any. The attachment does not work if these constraints are not met." + }, "creationTimestamp": { "description": "[Output Only] Creation timestamp in RFC3339 text format.", "type": "string" @@ -45852,7 +47075,7 @@ "type": "string" }, "ipsecInternalAddresses": { - "description": "A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool. Not currently available publicly. ", + "description": "A list of URLs of addresses that have been reserved for the VLAN attachment. Used only for the VLAN attachment that has the encryption option as IPSEC. The addresses must be regional internal IP address ranges. When creating an HA VPN gateway over the VLAN attachment, if the attachment is configured to use a regional internal IP address, then the VPN gateway's IP address is allocated from the IP address range specified here. For example, if the HA VPN gateway's interface 0 is paired to this VLAN attachment, then a regional internal IP address for the VPN gateway interface 0 will be allocated from the IP address specified for this VLAN attachment. If this field is not specified when creating the VLAN attachment, then later on when creating an HA VPN gateway on this VLAN attachment, the HA VPN gateway's IP address is allocated from the regional external IP address pool.", "items": { "type": "string" }, @@ -45863,6 +47086,18 @@ "description": "[Output Only] Type of the resource. Always compute#interconnectAttachment for interconnect attachments.", "type": "string" }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this InterconnectAttachment, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an InterconnectAttachment.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", + "type": "object" + }, "mtu": { "description": "Maximum Transmission Unit (MTU), in bytes, of packets passing through this interconnect attachment. Only 1440 and 1500 are allowed. If not specified, the value will default to 1440.", "format": "int32", @@ -45906,12 +47141,16 @@ "description": "[Output Only] URL of the region where the regional interconnect attachment resides. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" }, + "remoteService": { + "description": "[Output Only] If the attachment is on a Cross-Cloud Interconnect connection, this field contains the interconnect's remote location service provider. Example values: \"Amazon Web Services\" \"Microsoft Azure\". The field is set only for attachments on Cross-Cloud Interconnect connections. Its value is copied from the InterconnectRemoteLocation remoteService field.", + "type": "string" + }, "router": { "description": "URL of the Cloud Router to be used for dynamic routing. This router must be in the same region as this InterconnectAttachment. The InterconnectAttachment will automatically connect the Interconnect to the network \u0026 region within which the Cloud Router is configured.", "type": "string" }, "satisfiesPzs": { - "description": "[Output Only] Set to true if the resource satisfies the zone separation organization policy constraints and false otherwise. Defaults to false if the field is not present.", + "description": "[Output Only] Reserved for future use.", "type": "boolean" }, "selfLink": { @@ -45952,6 +47191,11 @@ ], "type": "string" }, + "subnetLength": { + "description": "Length of the IPv4 subnet mask. Allowed values: - 29 (default) - 30 The default value is 29, except for Cross-Cloud Interconnect connections that use an InterconnectRemoteLocation with a constraints.subnetLengthRange.min equal to 30. For example, connections that use an Azure remote location fall into this category. In these cases, the default value is 30, and requesting 29 returns an error. Where both 29 and 30 are allowed, 29 is preferred, because it gives Google Cloud Support more debugging visibility. ", + "format": "int32", + "type": "integer" + }, "type": { "description": "The type of interconnect attachment this is, which can take one of the following values: - DEDICATED: an attachment to a Dedicated Interconnect. - PARTNER: an attachment to a Partner Interconnect, created by the customer. - PARTNER_PROVIDER: an attachment to a Partner Interconnect, created by the partner. ", "enum": [ @@ -45967,7 +47211,7 @@ "type": "string" }, "vlanTag8021q": { - "description": "The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094. Only specified at creation time.", + "description": "The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4093. Only specified at creation time.", "format": "int32", "type": "integer" } @@ -46101,6 +47345,47 @@ }, "type": "object" }, + "InterconnectAttachmentConfigurationConstraints": { + "id": "InterconnectAttachmentConfigurationConstraints", + "properties": { + "bgpMd5": { + "description": "[Output Only] Whether the attachment's BGP session requires/allows/disallows BGP MD5 authentication. This can take one of the following values: MD5_OPTIONAL, MD5_REQUIRED, MD5_UNSUPPORTED. For example, a Cross-Cloud Interconnect connection to a remote cloud provider that requires BGP MD5 authentication has the interconnectRemoteLocation attachment_configuration_constraints.bgp_md5 field set to MD5_REQUIRED, and that property is propagated to the attachment. Similarly, if BGP MD5 is MD5_UNSUPPORTED, an error is returned if MD5 is requested.", + "enum": [ + "MD5_OPTIONAL", + "MD5_REQUIRED", + "MD5_UNSUPPORTED" + ], + "enumDescriptions": [ + "MD5_OPTIONAL: BGP MD5 authentication is supported and can optionally be configured.", + "MD5_REQUIRED: BGP MD5 authentication must be configured.", + "MD5_UNSUPPORTED: BGP MD5 authentication must not be configured" + ], + "type": "string" + }, + "bgpPeerAsnRanges": { + "description": "[Output Only] List of ASN ranges that the remote location is known to support. Formatted as an array of inclusive ranges {min: min-value, max: max-value}. For example, [{min: 123, max: 123}, {min: 64512, max: 65534}] allows the peer ASN to be 123 or anything in the range 64512-65534. This field is only advisory. Although the API accepts other ranges, these are the ranges that we recommend.", + "items": { + "$ref": "InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange" + }, + "type": "array" + } + }, + "type": "object" + }, + "InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange": { + "id": "InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange", + "properties": { + "max": { + "format": "uint32", + "type": "integer" + }, + "min": { + "format": "uint32", + "type": "integer" + } + }, + "type": "object" + }, "InterconnectAttachmentList": { "description": "Response to the list request, and contains a list of interconnect attachments.", "id": "InterconnectAttachmentList", @@ -46758,7 +48043,7 @@ "type": "string" }, "supportsPzs": { - "description": "[Output Only] Set to true for locations that support physical zone separation. Defaults to false if the field is not present.", + "description": "[Output Only] Reserved for future use.", "type": "boolean" } }, @@ -46884,111 +48169,413 @@ }, "type": "object" }, - "InterconnectLocationRegionInfo": { - "description": "Information about any potential InterconnectAttachments between an Interconnect at a specific InterconnectLocation, and a specific Cloud Region.", - "id": "InterconnectLocationRegionInfo", + "InterconnectLocationRegionInfo": { + "description": "Information about any potential InterconnectAttachments between an Interconnect at a specific InterconnectLocation, and a specific Cloud Region.", + "id": "InterconnectLocationRegionInfo", + "properties": { + "expectedRttMs": { + "description": "Expected round-trip time in milliseconds, from this InterconnectLocation to a VM in this region.", + "format": "int64", + "type": "string" + }, + "locationPresence": { + "description": "Identifies the network presence of this location.", + "enum": [ + "GLOBAL", + "LOCAL_REGION", + "LP_GLOBAL", + "LP_LOCAL_REGION" + ], + "enumDescriptions": [ + "This region is not in any common network presence with this InterconnectLocation.", + "This region shares the same regional network presence as this InterconnectLocation.", + "[Deprecated] This region is not in any common network presence with this InterconnectLocation.", + "[Deprecated] This region shares the same regional network presence as this InterconnectLocation." + ], + "type": "string" + }, + "region": { + "description": "URL for the region of this location.", + "type": "string" + } + }, + "type": "object" + }, + "InterconnectOutageNotification": { + "description": "Description of a planned outage on this Interconnect.", + "id": "InterconnectOutageNotification", + "properties": { + "affectedCircuits": { + "description": "If issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit IDs that will be affected.", + "items": { + "type": "string" + }, + "type": "array" + }, + "description": { + "description": "A description about the purpose of the outage.", + "type": "string" + }, + "endTime": { + "description": "Scheduled end time for the outage (milliseconds since Unix epoch).", + "format": "int64", + "type": "string" + }, + "issueType": { + "description": "Form this outage is expected to take, which can take one of the following values: - OUTAGE: The Interconnect may be completely out of service for some or all of the specified window. - PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a whole should remain up, but with reduced bandwidth. Note that the versions of this enum prefixed with \"IT_\" have been deprecated in favor of the unprefixed values.", + "enum": [ + "IT_OUTAGE", + "IT_PARTIAL_OUTAGE", + "OUTAGE", + "PARTIAL_OUTAGE" + ], + "enumDescriptions": [ + "[Deprecated] The Interconnect may be completely out of service for some or all of the specified window.", + "[Deprecated] Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth.", + "The Interconnect may be completely out of service for some or all of the specified window.", + "Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth." + ], + "type": "string" + }, + "name": { + "description": "Unique identifier for this outage notification.", + "type": "string" + }, + "source": { + "description": "The party that generated this notification, which can take the following value: - GOOGLE: this notification as generated by Google. Note that the value of NSRC_GOOGLE has been deprecated in favor of GOOGLE.", + "enum": [ + "GOOGLE", + "NSRC_GOOGLE" + ], + "enumDescriptions": [ + "This notification was generated by Google.", + "[Deprecated] This notification was generated by Google." + ], + "type": "string" + }, + "startTime": { + "description": "Scheduled start time for the outage (milliseconds since Unix epoch).", + "format": "int64", + "type": "string" + }, + "state": { + "description": "State of this notification, which can take one of the following values: - ACTIVE: This outage notification is active. The event could be in the past, present, or future. See start_time and end_time for scheduling. - CANCELLED: The outage associated with this notification was cancelled before the outage was due to start. - COMPLETED: The outage associated with this notification is complete. Note that the versions of this enum prefixed with \"NS_\" have been deprecated in favor of the unprefixed values.", + "enum": [ + "ACTIVE", + "CANCELLED", + "COMPLETED", + "NS_ACTIVE", + "NS_CANCELED" + ], + "enumDescriptions": [ + "This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", + "The outage associated with this notification was cancelled before the outage was due to start.", + "The outage associated with this notification is complete.", + "[Deprecated] This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", + "[Deprecated] The outage associated with this notification was canceled before the outage was due to start." + ], + "type": "string" + } + }, + "type": "object" + }, + "InterconnectRemoteLocation": { + "description": "Represents a Cross-Cloud Interconnect Remote Location resource. You can use this resource to find remote location details about an Interconnect attachment (VLAN).", + "id": "InterconnectRemoteLocation", + "properties": { + "address": { + "description": "[Output Only] The postal address of the Point of Presence, each line in the address is separated by a newline character.", + "type": "string" + }, + "attachmentConfigurationConstraints": { + "$ref": "InterconnectAttachmentConfigurationConstraints", + "description": "[Output Only] Subset of fields from InterconnectAttachment's |configurationConstraints| field that apply to all attachments for this remote location." + }, + "city": { + "description": "[Output Only] Metropolitan area designator that indicates which city an interconnect is located. For example: \"Chicago, IL\", \"Amsterdam, Netherlands\".", + "type": "string" + }, + "constraints": { + "$ref": "InterconnectRemoteLocationConstraints", + "description": "[Output Only] Constraints on the parameters for creating Cross-Cloud Interconnect and associated InterconnectAttachments." + }, + "continent": { + "description": "[Output Only] Continent for this location, which can take one of the following values: - AFRICA - ASIA_PAC - EUROPE - NORTH_AMERICA - SOUTH_AMERICA ", + "enum": [ + "AFRICA", + "ASIA_PAC", + "EUROPE", + "NORTH_AMERICA", + "SOUTH_AMERICA" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "" + ], + "type": "string" + }, + "creationTimestamp": { + "description": "[Output Only] Creation timestamp in RFC3339 text format.", + "type": "string" + }, + "description": { + "description": "[Output Only] An optional description of the resource.", + "type": "string" + }, + "facilityProvider": { + "description": "[Output Only] The name of the provider for this facility (e.g., EQUINIX).", + "type": "string" + }, + "facilityProviderFacilityId": { + "description": "[Output Only] A provider-assigned Identifier for this facility (e.g., Ashburn-DC1).", + "type": "string" + }, + "id": { + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64", + "type": "string" + }, + "kind": { + "default": "compute#interconnectRemoteLocation", + "description": "[Output Only] Type of the resource. Always compute#interconnectRemoteLocation for interconnect remote locations.", + "type": "string" + }, + "lacp": { + "description": "[Output Only] Link Aggregation Control Protocol (LACP) constraints, which can take one of the following values: LACP_SUPPORTED, LACP_UNSUPPORTED", + "enum": [ + "LACP_SUPPORTED", + "LACP_UNSUPPORTED" + ], + "enumDescriptions": [ + "LACP_SUPPORTED: LACP is supported, and enabled by default on the Cross-Cloud Interconnect.", + "LACP_UNSUPPORTED: LACP is not supported and is not be enabled on this port. GetDiagnostics shows bundleAggregationType as \"static\". GCP does not support LAGs without LACP, so requestedLinkCount must be 1." + ], + "type": "string" + }, + "maxLagSize100Gbps": { + "description": "[Output Only] The maximum number of 100 Gbps ports supported in a link aggregation group (LAG). When linkType is 100 Gbps, requestedLinkCount cannot exceed max_lag_size_100_gbps.", + "format": "int32", + "type": "integer" + }, + "maxLagSize10Gbps": { + "description": "[Output Only] The maximum number of 10 Gbps ports supported in a link aggregation group (LAG). When linkType is 10 Gbps, requestedLinkCount cannot exceed max_lag_size_10_gbps.", + "format": "int32", + "type": "integer" + }, + "name": { + "description": "[Output Only] Name of the resource.", + "type": "string" + }, + "peeringdbFacilityId": { + "description": "[Output Only] The peeringdb identifier for this facility (corresponding with a netfac type in peeringdb).", + "type": "string" + }, + "permittedConnections": { + "description": "[Output Only] Permitted connections.", + "items": { + "$ref": "InterconnectRemoteLocationPermittedConnections" + }, + "type": "array" + }, + "remoteService": { + "description": "[Output Only] Indicates the service provider present at the remote location. Example values: \"Amazon Web Services\", \"Microsoft Azure\".", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for the resource.", + "type": "string" + }, + "status": { + "description": "[Output Only] The status of this InterconnectRemoteLocation, which can take one of the following values: - CLOSED: The InterconnectRemoteLocation is closed and is unavailable for provisioning new Cross-Cloud Interconnects. - AVAILABLE: The InterconnectRemoteLocation is available for provisioning new Cross-Cloud Interconnects. ", + "enum": [ + "AVAILABLE", + "CLOSED" + ], + "enumDescriptions": [ + "The InterconnectRemoteLocation is available for provisioning new Cross-Cloud Interconnects.", + "The InterconnectRemoteLocation is closed for provisioning new Cross-Cloud Interconnects." + ], + "type": "string" + } + }, + "type": "object" + }, + "InterconnectRemoteLocationConstraints": { + "id": "InterconnectRemoteLocationConstraints", + "properties": { + "portPairRemoteLocation": { + "description": "[Output Only] Port pair remote location constraints, which can take one of the following values: PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, PORT_PAIR_MATCHING_REMOTE_LOCATION. GCP's API refers only to individual ports, but the UI uses this field when ordering a pair of ports, to prevent users from accidentally ordering something that is incompatible with their cloud provider. Specifically, when ordering a redundant pair of Cross-Cloud Interconnect ports, and one of them uses a remote location with portPairMatchingRemoteLocation set to matching, the UI requires that both ports use the same remote location.", + "enum": [ + "PORT_PAIR_MATCHING_REMOTE_LOCATION", + "PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION" + ], + "enumDescriptions": [ + "If PORT_PAIR_MATCHING_REMOTE_LOCATION, the remote cloud provider allocates ports in pairs, and the user should choose the same remote location for both ports.", + "If PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, a user may opt to provision a redundant pair of Cross-Cloud Interconnects using two different remote locations in the same city." + ], + "type": "string" + }, + "portPairVlan": { + "description": "[Output Only] Port pair VLAN constraints, which can take one of the following values: PORT_PAIR_UNCONSTRAINED_VLAN, PORT_PAIR_MATCHING_VLAN", + "enum": [ + "PORT_PAIR_MATCHING_VLAN", + "PORT_PAIR_UNCONSTRAINED_VLAN" + ], + "enumDescriptions": [ + "If PORT_PAIR_MATCHING_VLAN, the Interconnect for this attachment is part of a pair of ports that should have matching VLAN allocations. This occurs with Cross-Cloud Interconnect to Azure remote locations. While GCP's API does not explicitly group pairs of ports, the UI uses this field to ensure matching VLAN ids when configuring a redundant VLAN pair.", + "PORT_PAIR_UNCONSTRAINED_VLAN means there is no constraint." + ], + "type": "string" + }, + "subnetLengthRange": { + "$ref": "InterconnectRemoteLocationConstraintsSubnetLengthRange", + "description": "[Output Only] [min-length, max-length] The minimum and maximum value (inclusive) for the IPv4 subnet length. For example, an interconnectRemoteLocation for Azure has {min: 30, max: 30} because Azure requires /30 subnets. This range specifies the values supported by both cloud providers. Interconnect currently supports /29 and /30 IPv4 subnet lengths. If a remote cloud has no constraint on IPv4 subnet length, the range would thus be {min: 29, max: 30}. " + } + }, + "type": "object" + }, + "InterconnectRemoteLocationConstraintsSubnetLengthRange": { + "id": "InterconnectRemoteLocationConstraintsSubnetLengthRange", + "properties": { + "max": { + "format": "int32", + "type": "integer" + }, + "min": { + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "InterconnectRemoteLocationList": { + "description": "Response to the list request, and contains a list of interconnect remote locations.", + "id": "InterconnectRemoteLocationList", + "properties": { + "id": { + "description": "[Output Only] Unique identifier for the resource; defined by the server.", + "type": "string" + }, + "items": { + "description": "A list of InterconnectRemoteLocation resources.", + "items": { + "$ref": "InterconnectRemoteLocation" + }, + "type": "array" + }, + "kind": { + "default": "compute#interconnectRemoteLocationList", + "description": "[Output Only] Type of resource. Always compute#interconnectRemoteLocationList for lists of interconnect remote locations.", + "type": "string" + }, + "nextPageToken": { + "description": "[Output Only] This token lets you get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results.", + "type": "string" + }, + "selfLink": { + "description": "[Output Only] Server-defined URL for this resource.", + "type": "string" + }, + "warning": { + "description": "[Output Only] Informational warning message.", + "properties": { + "code": { + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DEPRECATED_TYPE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "EXPERIMENTAL_TYPE_USED", + "EXTERNAL_API_WARNING", + "FIELD_VALUE_OVERRIDEN", + "INJECTED_KERNELS_DEPRECATED", + "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", + "LARGE_DEPLOYMENT_WARNING", + "MISSING_TYPE_DEPENDENCY", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "PARTIAL_SUCCESS", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", + "RESOURCE_NOT_DELETED", + "SCHEMA_VALIDATION_IGNORED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNDECLARED_PROPERTIES", + "UNREACHABLE" + ], + "enumDescriptions": [ + "Warning about failed cleanup of transient changes made by a failed operation.", + "A link to a deprecated resource was created.", + "When deploying and at least one of the resources has a type marked as deprecated", + "The user created a boot disk that is larger than image size.", + "When deploying and at least one of the resources has a type marked as experimental", + "Warning that is present in an external api call", + "Warning that value of a field has been overridden. Deprecated unused field.", + "The operation involved use of an injected kernel, which is deprecated.", + "A WEIGHTED_MAGLEV backend service is associated with a health check that is not of type HTTP/HTTPS/HTTP2.", + "When deploying a deployment with a exceedingly large number of resources", + "A resource depends on a missing type", + "The route's nextHopIp address is not assigned to an instance on the network.", + "The route's next hop instance cannot ip forward.", + "The route's nextHopInstance URL refers to an instance that does not have an ipv6 interface on the same network as the route.", + "The route's nextHopInstance URL refers to an instance that does not exist.", + "The route's nextHopInstance URL refers to an instance that is not on the same network as the route.", + "The route's next hop instance does not have a status of RUNNING.", + "Error which is not critical. We decided to continue the process despite the mentioned error.", + "No results are present on a particular list page.", + "Success is reported, but some results may be missing due to errors", + "The user attempted to use a resource that requires a TOS they have not accepted.", + "Warning that a resource is in use.", + "One or more of the resources set to auto-delete could not be deleted because they were in use.", + "When a resource schema validation is ignored.", + "Instance template used in instance group manager is valid as such, but its application does not make a lot of sense, because it allows only single instance in instance group.", + "When undeclared properties in the schema are present", + "A given scope cannot be reached." + ], + "type": "string" + }, + "data": { + "description": "[Output Only] Metadata about this warning in key: value format. For example: \"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" } ", + "items": { + "properties": { + "key": { + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding).", + "type": "string" + }, + "value": { + "description": "[Output Only] A warning data value corresponding to the key.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "[Output Only] A human-readable description of the warning code.", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "InterconnectRemoteLocationPermittedConnections": { + "id": "InterconnectRemoteLocationPermittedConnections", "properties": { - "expectedRttMs": { - "description": "Expected round-trip time in milliseconds, from this InterconnectLocation to a VM in this region.", - "format": "int64", - "type": "string" - }, - "locationPresence": { - "description": "Identifies the network presence of this location.", - "enum": [ - "GLOBAL", - "LOCAL_REGION", - "LP_GLOBAL", - "LP_LOCAL_REGION" - ], - "enumDescriptions": [ - "This region is not in any common network presence with this InterconnectLocation.", - "This region shares the same regional network presence as this InterconnectLocation.", - "[Deprecated] This region is not in any common network presence with this InterconnectLocation.", - "[Deprecated] This region shares the same regional network presence as this InterconnectLocation." - ], - "type": "string" - }, - "region": { - "description": "URL for the region of this location.", - "type": "string" - } - }, - "type": "object" - }, - "InterconnectOutageNotification": { - "description": "Description of a planned outage on this Interconnect.", - "id": "InterconnectOutageNotification", - "properties": { - "affectedCircuits": { - "description": "If issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit IDs that will be affected.", - "items": { - "type": "string" - }, - "type": "array" - }, - "description": { - "description": "A description about the purpose of the outage.", - "type": "string" - }, - "endTime": { - "description": "Scheduled end time for the outage (milliseconds since Unix epoch).", - "format": "int64", - "type": "string" - }, - "issueType": { - "description": "Form this outage is expected to take, which can take one of the following values: - OUTAGE: The Interconnect may be completely out of service for some or all of the specified window. - PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a whole should remain up, but with reduced bandwidth. Note that the versions of this enum prefixed with \"IT_\" have been deprecated in favor of the unprefixed values.", - "enum": [ - "IT_OUTAGE", - "IT_PARTIAL_OUTAGE", - "OUTAGE", - "PARTIAL_OUTAGE" - ], - "enumDescriptions": [ - "[Deprecated] The Interconnect may be completely out of service for some or all of the specified window.", - "[Deprecated] Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth.", - "The Interconnect may be completely out of service for some or all of the specified window.", - "Some circuits comprising the Interconnect will be out of service during the expected window. The interconnect as a whole should remain up, albeit with reduced bandwidth." - ], - "type": "string" - }, - "name": { - "description": "Unique identifier for this outage notification.", - "type": "string" - }, - "source": { - "description": "The party that generated this notification, which can take the following value: - GOOGLE: this notification as generated by Google. Note that the value of NSRC_GOOGLE has been deprecated in favor of GOOGLE.", - "enum": [ - "GOOGLE", - "NSRC_GOOGLE" - ], - "enumDescriptions": [ - "This notification was generated by Google.", - "[Deprecated] This notification was generated by Google." - ], - "type": "string" - }, - "startTime": { - "description": "Scheduled start time for the outage (milliseconds since Unix epoch).", - "format": "int64", - "type": "string" - }, - "state": { - "description": "State of this notification, which can take one of the following values: - ACTIVE: This outage notification is active. The event could be in the past, present, or future. See start_time and end_time for scheduling. - CANCELLED: The outage associated with this notification was cancelled before the outage was due to start. - COMPLETED: The outage associated with this notification is complete. Note that the versions of this enum prefixed with \"NS_\" have been deprecated in favor of the unprefixed values.", - "enum": [ - "ACTIVE", - "CANCELLED", - "COMPLETED", - "NS_ACTIVE", - "NS_CANCELED" - ], - "enumDescriptions": [ - "This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", - "The outage associated with this notification was cancelled before the outage was due to start.", - "The outage associated with this notification is complete.", - "[Deprecated] This outage notification is active. The event could be in the future, present, or past. See start_time and end_time for scheduling.", - "[Deprecated] The outage associated with this notification was canceled before the outage was due to start." - ], + "interconnectLocation": { + "description": "[Output Only] URL of an Interconnect location that is permitted to connect to this Interconnect remote location.", "type": "string" } }, @@ -47731,7 +49318,7 @@ "type": "integer" }, "guestAcceleratorType": { - "description": "The accelerator type resource name, not a full URL, e.g. 'nvidia-tesla-k80'.", + "description": "The accelerator type resource name, not a full URL, e.g. nvidia-tesla-t4.", "type": "string" } }, @@ -48238,7 +49825,7 @@ "TERMINATED" ], "enumDescriptions": [ - "The Nanny is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", + "The instance is halted and we are performing tear down tasks like network deprogramming, releasing quota, IP, tearing down disks etc.", "Resources are being allocated for the instance.", "The instance is in repair.", "The instance is running.", @@ -48503,7 +50090,7 @@ "type": "string" }, "gatewayIPv4": { - "description": "[Output Only] The gateway address for default routing out of the network, selected by GCP.", + "description": "[Output Only] The gateway address for default routing out of the network, selected by Google Cloud.", "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}", "type": "string" }, @@ -48610,7 +50197,7 @@ "type": "string" }, "fingerprint": { - "description": "[Output Only] Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", + "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch.", "format": "byte", "type": "string" }, @@ -48635,7 +50222,7 @@ "type": "string" }, "network": { - "description": "[Output Only] The URL of the network which the Network Attachment belongs to.", + "description": "[Output Only] The URL of the network which the Network Attachment belongs to. Practically it is inferred by fetching the network of the first subnetwork associated. Because it is required that all the subnetworks must be from the same network, it is assured that the Network Attachment belongs to the same network as all the subnetworks.", "type": "string" }, "producerAcceptLists": { @@ -48799,7 +50386,7 @@ "id": "NetworkAttachmentConnectedEndpoint", "properties": { "ipAddress": { - "description": "The IP address assigned to the producer instance network interface. This value will be a range in case of Serverless.", + "description": "The IPv4 address assigned to the producer instance network interface. This value will be a range in case of Serverless.", "type": "string" }, "projectIdOrNum": { @@ -48807,7 +50394,7 @@ "type": "string" }, "secondaryIpCidrRanges": { - "description": "Alias IP ranges from the same subnetwork", + "description": "Alias IP ranges from the same subnetwork.", "items": { "type": "string" }, @@ -50142,7 +51729,7 @@ "type": "string" }, "ipv6Address": { - "description": "An IPv6 internal network address for this network interface.", + "description": "An IPv6 internal network address for this network interface. To use a static internal IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an internal IPv6 address from the instance's subnetwork.", "type": "string" }, "kind": { @@ -50158,6 +51745,10 @@ "description": "URL of the VPC network resource for this instance. When creating an instance, if neither the network nor the subnetwork is specified, the default network global/networks/default is used. If the selected project doesn't have the default network, you must specify a network or subnet. If the network is not specified but the subnetwork is specified, the network is inferred. If you specify this property, you can specify the network as a full or partial URL. For example, the following are all valid URLs: - https://www.googleapis.com/compute/v1/projects/project/global/networks/ network - projects/project/global/networks/network - global/networks/default ", "type": "string" }, + "networkAttachment": { + "description": "The URL of the network attachment that this interface should connect to in the following format: projects/{project_number}/regions/{region_name}/networkAttachments/{network_attachment_name}.", + "type": "string" + }, "networkIP": { "description": "An IPv4 internal IP address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system.", "type": "string" @@ -50182,7 +51773,7 @@ "type": "integer" }, "stackType": { - "description": "The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used. This field can be both set at instance creation and update network interface operations.", + "description": "The stack type for this network interface. To assign only IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This field can be both set at instance creation and update network interface operations.", "enum": [ "IPV4_IPV6", "IPV4_ONLY" @@ -51271,6 +52862,19 @@ }, "type": "object" }, + "NodeGroupsSimulateMaintenanceEventRequest": { + "id": "NodeGroupsSimulateMaintenanceEventRequest", + "properties": { + "nodes": { + "description": "Names of the nodes to go under maintenance simulation.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, "NodeTemplate": { "description": "Represent a sole-tenant Node Template resource. You can use a template to define properties for nodes in a node group. For more information, read Creating node groups and instances.", "id": "NodeTemplate", @@ -51336,7 +52940,7 @@ }, "nodeTypeFlexibility": { "$ref": "NodeTemplateNodeTypeFlexibility", - "description": "The flexible properties of the desired node type. Node groups that use this node template will create nodes of a type that matches these properties. This field is mutually exclusive with the node_type property; you can only define one or the other, but not both." + "description": "Do not use. Instead, use the node_type property." }, "region": { "description": "[Output Only] The name of the region where the node template resides, such as us-central1.", @@ -54737,6 +56341,7 @@ "COMMITTED_NVIDIA_A100_80GB_GPUS", "COMMITTED_NVIDIA_A100_GPUS", "COMMITTED_NVIDIA_K80_GPUS", + "COMMITTED_NVIDIA_L4_GPUS", "COMMITTED_NVIDIA_P100_GPUS", "COMMITTED_NVIDIA_P4_GPUS", "COMMITTED_NVIDIA_T4_GPUS", @@ -54785,13 +56390,18 @@ "N2D_CPUS", "N2_CPUS", "NETWORKS", + "NETWORK_ATTACHMENTS", "NETWORK_ENDPOINT_GROUPS", "NETWORK_FIREWALL_POLICIES", + "NET_LB_SECURITY_POLICIES_PER_REGION", + "NET_LB_SECURITY_POLICY_RULES_PER_REGION", + "NET_LB_SECURITY_POLICY_RULE_ATTRIBUTES_PER_REGION", "NODE_GROUPS", "NODE_TEMPLATES", "NVIDIA_A100_80GB_GPUS", "NVIDIA_A100_GPUS", "NVIDIA_K80_GPUS", + "NVIDIA_L4_GPUS", "NVIDIA_P100_GPUS", "NVIDIA_P100_VWS_GPUS", "NVIDIA_P4_GPUS", @@ -54806,6 +56416,7 @@ "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS", "PREEMPTIBLE_NVIDIA_A100_GPUS", "PREEMPTIBLE_NVIDIA_K80_GPUS", + "PREEMPTIBLE_NVIDIA_L4_GPUS", "PREEMPTIBLE_NVIDIA_P100_GPUS", "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS", "PREEMPTIBLE_NVIDIA_P4_GPUS", @@ -54829,6 +56440,7 @@ "ROUTES", "SECURITY_POLICIES", "SECURITY_POLICIES_PER_REGION", + "SECURITY_POLICY_ADVANCED_RULES_PER_REGION", "SECURITY_POLICY_CEVAL_RULES", "SECURITY_POLICY_RULES", "SECURITY_POLICY_RULES_PER_REGION", @@ -54886,6 +56498,7 @@ "", "", "", + "", "Guest CPUs", "", "", @@ -54976,6 +56589,13 @@ "", "", "", + "", + "", + "", + "", + "", + "", + "", "The total number of snapshots allowed for a single project.", "", "", @@ -55129,6 +56749,20 @@ }, "type": "object" }, + "RegionAddressesMoveRequest": { + "id": "RegionAddressesMoveRequest", + "properties": { + "description": { + "description": "An optional destination address description if intended to be different from the source.", + "type": "string" + }, + "destinationAddress": { + "description": "The URL of the destination address to move to. This can be a full or partial URL. For example, the following are all valid URLs to a address: - https://www.googleapis.com/compute/v1/projects/project/regions/region /addresses/address - projects/project/regions/region/addresses/address Note that destination project must be different from the source project. So /regions/region/addresses/address is not valid partial url.", + "type": "string" + } + }, + "type": "object" + }, "RegionAutoscalerList": { "description": "Contains a list of autoscalers.", "id": "RegionAutoscalerList", @@ -56416,6 +58050,10 @@ "description": "Resource policies to be added to this reservation. The key is defined by user, and the value is resource policy url. This is to define placement policy with reservation.", "type": "object" }, + "resourceStatus": { + "$ref": "AllocationResourceStatus", + "description": "[Output Only] Status information for Reservation resource." + }, "satisfiesPzs": { "description": "[Output Only] Reserved for future use.", "type": "boolean" @@ -57289,7 +58927,7 @@ "type": "string" }, "timeZone": { - "description": "Specifies the time zone to be used in interpreting Schedule.schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database.", + "description": "Specifies the time zone to be used in interpreting Schedule.schedule. The value of this field must be a time zone name from the tz database: https://wikipedia.org/wiki/Tz_database.", "type": "string" }, "vmStartSchedule": { @@ -58298,7 +59936,7 @@ "type": "string" }, "advertisedGroups": { - "description": "User-specified list of prefix groups to advertise in custom mode, which can take one of the following options: - ALL_SUBNETS: Advertises all available subnets, including peer VPC subnets. - ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. Note that this field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the \"bgp\" message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups.", + "description": "User-specified list of prefix groups to advertise in custom mode, which currently supports the following option: - ALL_SUBNETS: Advertises all of the router's own VPC subnets. This excludes any routes learned for subnets that use VPC Network Peering. Note that this field can only be populated if advertise_mode is CUSTOM and overrides the list defined for the router (in the \"bgp\" message). These groups are advertised in addition to any specified prefixes. Leave this field blank to advertise no custom groups.", "items": { "enum": [ "ALL_SUBNETS" @@ -58722,7 +60360,7 @@ "type": "array" }, "sourceSubnetworkIpRangesToNat": { - "description": "Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any other Router.Nat section in any Router for this network in this region.", + "description": "Specify the Nat option, which can take one of the following values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every Subnetwork are allowed to Nat. - ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to Nat (specified in the field subnetwork below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES then there should not be any other Router.Nat section in any Router for this network in this region.", "enum": [ "ALL_SUBNETWORKS_ALL_IP_RANGES", "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES", @@ -59932,6 +61570,18 @@ "description": "[Output only] Type of the resource. Always compute#securityPolicyfor security policies", "type": "string" }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this security policy, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels. To see the latest fingerprint, make get() request to the security policy.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", + "type": "object" + }, "name": { "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", @@ -59956,7 +61606,7 @@ "type": "string" }, "type": { - "description": "The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. This field can be set only at resource creation time.", + "description": "The type indicates the intended use of the security policy. - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. - CLOUD_ARMOR_INTERNAL_SERVICE: Cloud Armor internal service policies can be configured to filter HTTP requests targeting services managed by Traffic Director in a service mesh. They filter requests before the request is served from the application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. This field can be set only at resource creation time.", "enum": [ "CLOUD_ARMOR", "CLOUD_ARMOR_EDGE", @@ -59984,15 +61634,15 @@ "type": "object" }, "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig": { - "description": "Configuration options for L7 DDoS detection.", + "description": "Configuration options for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", "id": "SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig", "properties": { "enable": { - "description": "If set to true, enables CAAP for L7 DDoS detection.", + "description": "If set to true, enables CAAP for L7 DDoS detection. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", "type": "boolean" }, "ruleVisibility": { - "description": "Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules.", + "description": "Rule visibility can be one of the following: STANDARD - opaque rules. (default) PREMIUM - transparent rules. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", "enum": [ "PREMIUM", "STANDARD" @@ -60187,7 +61837,7 @@ "id": "SecurityPolicyRecaptchaOptionsConfig", "properties": { "redirectSiteKey": { - "description": "An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used.", + "description": "An optional field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used. This field is only supported in Global Security Policies of type CLOUD_ARMOR.", "type": "string" } }, @@ -60207,7 +61857,7 @@ "id": "SecurityPolicyRule", "properties": { "action": { - "description": "The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(): deny access to target, returns the HTTP response code specified (valid values are 403, 404, and 502). - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. ", + "description": "The Action to perform when the rule is matched. The following are the valid actions: - allow: allow access to target. - deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for `STATUS` are 403, 404, and 502. - rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rate_limit_options to be set. - redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. This action is only supported in Global Security Policies of type CLOUD_ARMOR. - throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rate_limit_options to be set for this. ", "type": "string" }, "description": { @@ -60216,7 +61866,7 @@ }, "headerAction": { "$ref": "SecurityPolicyRuleHttpHeaderAction", - "description": "Optional, additional actions that are performed on headers." + "description": "Optional, additional actions that are performed on headers. This field is only supported in Global Security Policies of type CLOUD_ARMOR." }, "kind": { "default": "compute#securityPolicyRule", @@ -60227,6 +61877,10 @@ "$ref": "SecurityPolicyRuleMatcher", "description": "A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding 'action' is enforced." }, + "preconfiguredWafConfig": { + "$ref": "SecurityPolicyRulePreconfiguredWafConfig", + "description": "Preconfigured WAF configuration to be applied for the rule. If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect." + }, "preview": { "description": "If set to true, the specified action is not enforced.", "type": "boolean" @@ -60242,7 +61896,7 @@ }, "redirectOptions": { "$ref": "SecurityPolicyRuleRedirectOptions", - "description": "Parameters defining the redirect action. Cannot be specified for any other actions." + "description": "Parameters defining the redirect action. Cannot be specified for any other actions. This field is only supported in Global Security Policies of type CLOUD_ARMOR." } }, "type": "object" @@ -60284,7 +61938,7 @@ }, "expr": { "$ref": "Expr", - "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header." + "description": "User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. Expressions containing `evaluateThreatIntelligence` require Cloud Armor Managed Protection Plus tier and are not supported in Edge Policies nor in Regional Policies. Expressions containing `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor Managed Protection Plus tier and are only supported in Global Security Policies." }, "versionedExpr": { "description": "Preconfigured versioned expression. If this field is specified, config must also be specified. Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding src_ip_range field in config.", @@ -60312,6 +61966,92 @@ }, "type": "object" }, + "SecurityPolicyRulePreconfiguredWafConfig": { + "id": "SecurityPolicyRulePreconfiguredWafConfig", + "properties": { + "exclusions": { + "description": "A list of exclusions to apply during preconfigured WAF evaluation.", + "items": { + "$ref": "SecurityPolicyRulePreconfiguredWafConfigExclusion" + }, + "type": "array" + } + }, + "type": "object" + }, + "SecurityPolicyRulePreconfiguredWafConfigExclusion": { + "id": "SecurityPolicyRulePreconfiguredWafConfigExclusion", + "properties": { + "requestCookiesToExclude": { + "description": "A list of request cookie names whose value will be excluded from inspection during preconfigured WAF evaluation.", + "items": { + "$ref": "SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams" + }, + "type": "array" + }, + "requestHeadersToExclude": { + "description": "A list of request header names whose value will be excluded from inspection during preconfigured WAF evaluation.", + "items": { + "$ref": "SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams" + }, + "type": "array" + }, + "requestQueryParamsToExclude": { + "description": "A list of request query parameter names whose value will be excluded from inspection during preconfigured WAF evaluation. Note that the parameter can be in the query string or in the POST body.", + "items": { + "$ref": "SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams" + }, + "type": "array" + }, + "requestUrisToExclude": { + "description": "A list of request URIs from the request line to be excluded from inspection during preconfigured WAF evaluation. When specifying this field, the query or fragment part should be excluded.", + "items": { + "$ref": "SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams" + }, + "type": "array" + }, + "targetRuleIds": { + "description": "A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. If omitted, it refers to all the rule IDs under the WAF rule set.", + "items": { + "type": "string" + }, + "type": "array" + }, + "targetRuleSet": { + "description": "Target WAF rule set to apply the preconfigured WAF exclusion.", + "type": "string" + } + }, + "type": "object" + }, + "SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams": { + "id": "SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams", + "properties": { + "op": { + "description": "The match operator for the field.", + "enum": [ + "CONTAINS", + "ENDS_WITH", + "EQUALS", + "EQUALS_ANY", + "STARTS_WITH" + ], + "enumDescriptions": [ + "The operator matches if the field value contains the specified value.", + "The operator matches if the field value ends with the specified value.", + "The operator matches if the field value equals the specified value.", + "The operator matches if the field value is any value.", + "The operator matches if the field value starts with the specified value." + ], + "type": "string" + }, + "val": { + "description": "The value of the field.", + "type": "string" + } + }, + "type": "object" + }, "SecurityPolicyRuleRateLimitOptions": { "id": "SecurityPolicyRuleRateLimitOptions", "properties": { @@ -60352,17 +62092,24 @@ ], "type": "string" }, + "enforceOnKeyConfigs": { + "description": "If specified, any combination of values of enforce_on_key_type/enforce_on_key_name is treated as the key on which ratelimit threshold/action is enforced. You can specify up to 3 enforce_on_key_configs. If enforce_on_key_configs is specified, enforce_on_key must not be specified.", + "items": { + "$ref": "SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig" + }, + "type": "array" + }, "enforceOnKeyName": { "description": "Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value.", "type": "string" }, "exceedAction": { - "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are \"deny(status)\", where valid values for status are 403, 404, 429, and 502, and \"redirect\" where the redirect parameters come from exceedRedirectOptions below.", + "description": "Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, and 502, and `redirect`, where the redirect parameters come from `exceedRedirectOptions` below. The `redirect` action is only supported in Global Security Policies of type CLOUD_ARMOR.", "type": "string" }, "exceedRedirectOptions": { "$ref": "SecurityPolicyRuleRedirectOptions", - "description": "Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect." + "description": "Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. This field is only supported in Global Security Policies of type CLOUD_ARMOR." }, "rateLimitThreshold": { "$ref": "SecurityPolicyRuleRateLimitOptionsThreshold", @@ -60371,6 +62118,40 @@ }, "type": "object" }, + "SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig": { + "id": "SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig", + "properties": { + "enforceOnKeyName": { + "description": "Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value.", + "type": "string" + }, + "enforceOnKeyType": { + "description": "Determines the key to enforce the rate_limit_threshold on. Possible values are: - ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if \"enforceOnKeyConfigs\" is not configured. - IP: The source IP address of the request is the key. Each IP has this limit enforced separately. - HTTP_HEADER: The value of the HTTP header whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. - XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is configured under \"enforceOnKeyName\". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. - HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. - SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. - REGION_CODE: The country/region from which the request originates. ", + "enum": [ + "ALL", + "HTTP_COOKIE", + "HTTP_HEADER", + "HTTP_PATH", + "IP", + "REGION_CODE", + "SNI", + "XFF_IP" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "" + ], + "type": "string" + } + }, + "type": "object" + }, "SecurityPolicyRuleRateLimitOptionsThreshold": { "id": "SecurityPolicyRuleRateLimitOptionsThreshold", "properties": { @@ -60414,11 +62195,11 @@ "id": "SecuritySettings", "properties": { "clientTlsPolicy": { - "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact.", + "description": "Optional. A URL referring to a networksecurity.ClientTlsPolicy resource that describes how clients should authenticate with this service's backends. clientTlsPolicy only applies to a global BackendService with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted.", "type": "string" }, "subjectAltNames": { - "description": "Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode). Note: This field currently has no impact.", + "description": "Optional. A list of Subject Alternative Names (SANs) that the client verifies during a mutual TLS handshake with an server/endpoint for this BackendService. When the server presents its X.509 certificate to the client, the client inspects the certificate's subjectAltName field. If the field contains one of the specified values, the communication continues. Otherwise, it fails. This additional check enables the client to verify that the server is authorized to run the requested service. Note that the contents of the server certificate's subjectAltName field are configured by the Public Key Infrastructure which provisions server identities. Only applies to a global BackendService with loadBalancingScheme set to INTERNAL_SELF_MANAGED. Only applies when BackendService has an attached clientTlsPolicy with clientCertificate (mTLS mode).", "items": { "type": "string" }, @@ -60495,7 +62276,7 @@ "type": "object" }, "ServiceAttachment": { - "description": "Represents a ServiceAttachment resource. A service attachment represents a service that a producer has exposed. It encapsulates the load balancer which fronts the service runs and a list of NAT IP ranges that the producers uses to represent the consumers connecting to the service. next tag = 20", + "description": "Represents a ServiceAttachment resource. A service attachment represents a service that a producer has exposed. It encapsulates the load balancer which fronts the service runs and a list of NAT IP ranges that the producers uses to represent the consumers connecting to the service.", "id": "ServiceAttachment", "properties": { "connectedEndpoints": { @@ -60592,6 +62373,10 @@ "$ref": "Uint128", "description": "[Output Only] An 128-bit global unique ID of the PSC service attachment." }, + "reconcileConnections": { + "description": "This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. - If false, connection policy update will only affect existing PENDING PSC endpoints. Existing ACCEPTED/REJECTED endpoints will remain untouched regardless how the connection policy is modified . - If true, update will affect both PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED PSC endpoint will be moved to REJECTED if its project is added to the reject list. For newly created service attachment, this boolean defaults to true.", + "type": "boolean" + }, "region": { "description": "[Output Only] URL of the region where the service attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body.", "type": "string" @@ -61226,6 +63011,7 @@ "name": { "annotations": { "required": [ + "compute.disks.createSnapshot", "compute.snapshots.insert" ] }, @@ -61461,7 +63247,7 @@ "id": "SourceInstanceParams", "properties": { "diskConfigs": { - "description": "Attached disks configuration. If not provided, defaults are applied: For boot disk and any other R/W disks, new custom images will be created from each disk. For read-only disks, they will be attached in read-only mode. Local SSD disks will be created as blank volumes.", + "description": "Attached disks configuration. If not provided, defaults are applied: For boot disk and any other R/W disks, the source images for each disk will be used. For read-only disks, they will be attached in read-only mode. Local SSD disks will be created as blank volumes.", "items": { "$ref": "DiskInstantiationConfig" }, @@ -62666,7 +64452,7 @@ "type": "string" }, "enableFlowLogs": { - "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. This field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", + "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. This field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", "type": "boolean" }, "externalIpv6Prefix": { @@ -62748,7 +64534,7 @@ "type": "string" }, "purpose": { - "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", + "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", "enum": [ "INTERNAL_HTTPS_LOAD_BALANCER", "PRIVATE", @@ -62770,7 +64556,7 @@ "type": "string" }, "role": { - "description": "The role of subnetwork. Currently, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "description": "The role of subnetwork. Currently, this field is only used when purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", "enum": [ "ACTIVE", "BACKUP" @@ -63091,7 +64877,7 @@ "type": "string" }, "enable": { - "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled.", + "description": "Whether to enable flow logging for this subnetwork. If this field is not explicitly set, it will not appear in get listings. If not set the default behavior is determined by the org policy, if there is no org policy specified, then it will default to disabled. Flow logging isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", "type": "boolean" }, "filterExpr": { @@ -63942,7 +65728,7 @@ "id": "TargetHttpsProxiesSetCertificateMapRequest", "properties": { "certificateMap": { - "description": "URL of the Certificate Map to associate with this TargetHttpsProxy.", + "description": "URL of the Certificate Map to associate with this TargetHttpsProxy. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", "type": "string" } }, @@ -63990,7 +65776,7 @@ "type": "string" }, "certificateMap": { - "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", + "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", "type": "string" }, "creationTimestamp": { @@ -64048,7 +65834,7 @@ "type": "string" }, "serverTlsPolicy": { - "description": "Optional. A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound traffic. serverTlsPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, communications are not encrypted. Note: This field currently has no impact.", + "description": "Optional. A URL referring to a networksecurity.ServerTlsPolicy resource that describes how the proxy should authenticate inbound traffic. serverTlsPolicy only applies to a global TargetHttpsProxy attached to globalForwardingRules with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED. For details which ServerTlsPolicy resources are accepted with INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy documentation. If left blank, communications are not encrypted.", "type": "string" }, "sslCertificates": { @@ -65247,7 +67033,7 @@ "id": "TargetSslProxiesSetCertificateMapRequest", "properties": { "certificateMap": { - "description": "URL of the Certificate Map to associate with this TargetSslProxy.", + "description": "URL of the Certificate Map to associate with this TargetSslProxy. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", "type": "string" } }, @@ -65289,7 +67075,7 @@ "id": "TargetSslProxy", "properties": { "certificateMap": { - "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored.", + "description": "URL of a certificate map that identifies a certificate map associated with the given target proxy. This field can only be set for global target proxies. If set, sslCertificates will be ignored. Accepted format is //certificatemanager.googleapis.com/projects/{project }/locations/{location}/certificateMaps/{resourceName}.", "type": "string" }, "creationTimestamp": { @@ -65933,6 +67719,18 @@ "description": "[Output Only] Type of resource. Always compute#targetVpnGateway for target VPN gateways.", "type": "string" }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this TargetVpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a TargetVpnGateway.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", + "type": "object" + }, "name": { "annotations": { "required": [ @@ -66988,6 +68786,10 @@ "pathPrefixRewrite": { "description": "Before forwarding the request to the selected backend service, the matching portion of the request's path is replaced by pathPrefixRewrite. The value must be from 1 to 1024 characters.", "type": "string" + }, + "pathTemplateRewrite": { + "description": " If specified, the pattern rewrites the URL path (based on the :path header) using the HTTP template syntax. A corresponding path_template_match must be specified. Any template variables must exist in the path_template_match field. - -At least one variable must be specified in the path_template_match field - You can omit variables from the rewritten URL - The * and ** operators cannot be matched unless they have a corresponding variable name - e.g. {format=*} or {var=**}. For example, a path_template_match of /static/{format=**} could be rewritten as /static/content/{format} to prefix /content to the URL. Variables can also be re-ordered in a rewrite, so that /{country}/{format}/{suffix=**} can be rewritten as /content/{format}/{country}/{suffix}. At least one non-empty routeRules[].matchRules[].path_template_match is required. Only one of path_prefix_rewrite or path_template_rewrite may be specified.", + "type": "string" } }, "type": "object" @@ -67025,7 +68827,7 @@ "type": "string" }, "purpose": { - "description": "The purpose of the resource. This field can be either PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created subnetwork that is reserved for Internal HTTP(S) Load Balancing. If unspecified, the purpose defaults to PRIVATE_RFC_1918. The enableFlowLogs field isn't supported with the purpose field set to INTERNAL_HTTPS_LOAD_BALANCER.", + "description": "The purpose of the resource. This field can be either PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for user-created subnets or subnets that are automatically created in auto mode networks. A subnet with purpose set to REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved for regional Envoy-based load balancers. A subnet with purpose set to PRIVATE_SERVICE_CONNECT is used to publish services using Private Service Connect. A subnet with purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used only by regional internal HTTP(S) load balancers. Note that REGIONAL_MANAGED_PROXY is the preferred setting for all regional Envoy load balancers. If unspecified, the subnet purpose defaults to PRIVATE. The enableFlowLogs field isn't supported if the subnet purpose field is set to REGIONAL_MANAGED_PROXY.", "enum": [ "INTERNAL_HTTPS_LOAD_BALANCER", "PRIVATE", @@ -67043,7 +68845,7 @@ "type": "string" }, "role": { - "description": "The role of subnetwork. Currently, this field is only used when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", + "description": "The role of subnetwork. Currently, this field is only used when purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being used for Envoy-based load balancers in a region. A BACKUP subnetwork is one that is ready to be promoted to ACTIVE or is currently draining. This field can be updated with a patch request.", "enum": [ "ACTIVE", "BACKUP" @@ -67471,7 +69273,7 @@ "type": "string" }, "labelFingerprint": { - "description": "A fingerprint for the labels being applied to this VpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve an VpnGateway.", + "description": "A fingerprint for the labels being applied to this VpnGateway, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a VpnGateway.", "format": "byte", "type": "string" }, @@ -67830,7 +69632,7 @@ "type": "integer" }, "peerGatewayInterface": { - "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or GCP VPN gateway.", + "description": "The peer gateway interface this VPN tunnel is connected to, the peer gateway could either be an external VPN gateway or a Google Cloud VPN gateway.", "format": "uint32", "type": "integer" }, @@ -67842,7 +69644,7 @@ "type": "object" }, "VpnGatewayStatusVpnConnection": { - "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be a external VPN gateway or GCP VPN gateway.", + "description": "A VPN connection contains all VPN tunnels connected from this VpnGateway to the same peer gateway. The peer gateway could either be an external VPN gateway or a Google Cloud VPN gateway.", "id": "VpnGatewayStatusVpnConnection", "properties": { "peerExternalGateway": { @@ -68029,6 +69831,18 @@ "description": "[Output Only] Type of resource. Always compute#vpnTunnel for VPN tunnels.", "type": "string" }, + "labelFingerprint": { + "description": "A fingerprint for the labels being applied to this VpnTunnel, which is essentially a hash of the labels set used for optimistic locking. The fingerprint is initially generated by Compute Engine and changes after every request to modify or update labels. You must always provide an up-to-date fingerprint hash in order to update or change labels, otherwise the request will fail with error 412 conditionNotMet. To see the latest fingerprint, make a get() request to retrieve a VpnTunnel.", + "format": "byte", + "type": "string" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels for this resource. These can only be added or modified by the setLabels method. Each label key/value pair must comply with RFC1035. Label values may be empty.", + "type": "object" + }, "localTrafficSelector": { "description": "Local traffic selector to use when establishing the VPN tunnel with the peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges must be disjoint. Only IPv4 is supported.", "items": { @@ -68519,6 +70333,11 @@ "id": { "description": "Expression ID should uniquely identify the origin of the expression. E.g. owasp-crs-v020901-id973337 identifies Owasp core rule set version 2.9.1 rule id 973337. The ID could be used to determine the individual attack definition that has been detected. It could also be used to exclude it from the policy in case of false positive. required", "type": "string" + }, + "sensitivity": { + "description": "The sensitivity value associated with the WAF rule ID. This corresponds to the ModSecurity paranoia level, ranging from 1 to 4. 0 is reserved for opt-in only rules.", + "format": "int32", + "type": "integer" } }, "type": "object" diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index df7f8c82..47ec0a57 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -1,4 +1,4 @@ -// Copyright 2022 Google LLC. +// Copyright 2023 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -75,6 +75,7 @@ var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint +var _ = internal.Version const apiId = "compute:v1" const apiName = "compute" @@ -172,6 +173,7 @@ func New(client *http.Client) (*Service, error) { s.Instances = NewInstancesService(s) s.InterconnectAttachments = NewInterconnectAttachmentsService(s) s.InterconnectLocations = NewInterconnectLocationsService(s) + s.InterconnectRemoteLocations = NewInterconnectRemoteLocationsService(s) s.Interconnects = NewInterconnectsService(s) s.LicenseCodes = NewLicenseCodesService(s) s.Licenses = NewLicensesService(s) @@ -198,6 +200,7 @@ func New(client *http.Client) (*Service, error) { s.RegionHealthChecks = NewRegionHealthChecksService(s) s.RegionInstanceGroupManagers = NewRegionInstanceGroupManagersService(s) s.RegionInstanceGroups = NewRegionInstanceGroupsService(s) + s.RegionInstanceTemplates = NewRegionInstanceTemplatesService(s) s.RegionInstances = NewRegionInstancesService(s) s.RegionNetworkEndpointGroups = NewRegionNetworkEndpointGroupsService(s) s.RegionNetworkFirewallPolicies = NewRegionNetworkFirewallPoliciesService(s) @@ -298,6 +301,8 @@ type Service struct { InterconnectLocations *InterconnectLocationsService + InterconnectRemoteLocations *InterconnectRemoteLocationsService + Interconnects *InterconnectsService LicenseCodes *LicenseCodesService @@ -350,6 +355,8 @@ type Service struct { RegionInstanceGroups *RegionInstanceGroupsService + RegionInstanceTemplates *RegionInstanceTemplatesService + RegionInstances *RegionInstancesService RegionNetworkEndpointGroups *RegionNetworkEndpointGroupsService @@ -682,6 +689,15 @@ type InterconnectLocationsService struct { s *Service } +func NewInterconnectRemoteLocationsService(s *Service) *InterconnectRemoteLocationsService { + rs := &InterconnectRemoteLocationsService{s: s} + return rs +} + +type InterconnectRemoteLocationsService struct { + s *Service +} + func NewInterconnectsService(s *Service) *InterconnectsService { rs := &InterconnectsService{s: s} return rs @@ -916,6 +932,15 @@ type RegionInstanceGroupsService struct { s *Service } +func NewRegionInstanceTemplatesService(s *Service) *RegionInstanceTemplatesService { + rs := &RegionInstanceTemplatesService{s: s} + return rs +} + +type RegionInstanceTemplatesService struct { + s *Service +} + func NewRegionInstancesService(s *Service) *RegionInstancesService { rs := &RegionInstancesService{s: s} return rs @@ -1905,31 +1930,35 @@ func (s *AcceleratorTypesScopedListWarningData) MarshalJSON() ([]byte, error) { // AccessConfig: An access configuration attached to an instance's // network interface. Only one access config per instance is supported. type AccessConfig struct { - // ExternalIpv6: The first IPv6 address of the external IPv6 range - // associated with this instance, prefix length is stored in - // externalIpv6PrefixLength in ipv6AccessConfig. The field is output - // only, an IPv6 address from a subnetwork associated with the instance - // will be allocated dynamically. + // ExternalIpv6: Applies to ipv6AccessConfigs only. The first IPv6 + // address of the external IPv6 range associated with this instance, + // prefix length is stored in externalIpv6PrefixLength in + // ipv6AccessConfig. To use a static external IP address, it must be + // unused and in the same region as the instance's zone. If not + // specified, Google Cloud will automatically assign an external IPv6 + // address from the instance's subnetwork. ExternalIpv6 string `json:"externalIpv6,omitempty"` - // ExternalIpv6PrefixLength: The prefix length of the external IPv6 - // range. + // ExternalIpv6PrefixLength: Applies to ipv6AccessConfigs only. The + // prefix length of the external IPv6 range. ExternalIpv6PrefixLength int64 `json:"externalIpv6PrefixLength,omitempty"` // Kind: [Output Only] Type of the resource. Always compute#accessConfig // for access configs. Kind string `json:"kind,omitempty"` - // Name: The name of this access configuration. The default and - // recommended name is External NAT, but you can use any arbitrary - // string, such as My external IP or Network Access. + // Name: The name of this access configuration. In accessConfigs (IPv4), + // the default and recommended name is External NAT, but you can use any + // arbitrary string, such as My external IP or Network Access. In + // ipv6AccessConfigs, the recommend name is External IPv6. Name string `json:"name,omitempty"` - // NatIP: An external IP address associated with this instance. Specify - // an unused static external IP address available to the project or - // leave this field undefined to use an IP from a shared ephemeral IP - // address pool. If you specify a static external IP address, it must - // live in the same region as the zone of the instance. + // NatIP: Applies to accessConfigs (IPv4) only. An external IP address + // associated with this instance. Specify an unused static external IP + // address available to the project or leave this field undefined to use + // an IP from a shared ephemeral IP address pool. If you specify a + // static external IP address, it must live in the same region as the + // zone of the instance. NatIP string `json:"natIP,omitempty"` // NetworkTier: This signifies the networking tier used for configuring @@ -1965,12 +1994,13 @@ type AccessConfig struct { // associated. SetPublicPtr bool `json:"setPublicPtr,omitempty"` - // Type: The type of configuration. The default and only option is - // ONE_TO_ONE_NAT. + // Type: The type of configuration. In accessConfigs (IPv4), the default + // and only option is ONE_TO_ONE_NAT. In ipv6AccessConfigs, the default + // and only option is DIRECT_IPV6. // // Possible values: // "DIRECT_IPV6" - // "ONE_TO_ONE_NAT" (default) + // "ONE_TO_ONE_NAT" Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "ExternalIpv6") to @@ -2052,6 +2082,21 @@ type Address struct { // addresses. Kind string `json:"kind,omitempty"` + // LabelFingerprint: A fingerprint for the labels being applied to this + // Address, which is essentially a hash of the labels set used for + // optimistic locking. The fingerprint is initially generated by Compute + // Engine and changes after every request to modify or update labels. + // You must always provide an up-to-date fingerprint hash in order to + // update or change labels, otherwise the request will fail with error + // 412 conditionNotMet. To see the latest fingerprint, make a get() + // request to retrieve an Address. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels for this resource. These can only be added or modified + // by the setLabels method. Each label key/value pair must comply with + // RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -2827,6 +2872,68 @@ func (s *AliasIpRange) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// AllocationResourceStatus: [Output Only] Contains output only fields. +type AllocationResourceStatus struct { + // SpecificSkuAllocation: Allocation Properties of this reservation. + SpecificSkuAllocation *AllocationResourceStatusSpecificSKUAllocation `json:"specificSkuAllocation,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "SpecificSkuAllocation") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SpecificSkuAllocation") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AllocationResourceStatus) MarshalJSON() ([]byte, error) { + type NoMethod AllocationResourceStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// AllocationResourceStatusSpecificSKUAllocation: Contains Properties +// set for the reservation. +type AllocationResourceStatusSpecificSKUAllocation struct { + // SourceInstanceTemplateId: ID of the instance template used to + // populate reservation properties. + SourceInstanceTemplateId string `json:"sourceInstanceTemplateId,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "SourceInstanceTemplateId") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "SourceInstanceTemplateId") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *AllocationResourceStatusSpecificSKUAllocation) MarshalJSON() ([]byte, error) { + type NoMethod AllocationResourceStatusSpecificSKUAllocation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk struct { // DiskSizeGb: Specifies the size of the disk in base-2 GB. DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` @@ -2928,6 +3035,17 @@ type AllocationSpecificSKUReservation struct { // InstanceProperties: The instance properties for the reservation. InstanceProperties *AllocationSpecificSKUAllocationReservedInstanceProperties `json:"instanceProperties,omitempty"` + // SourceInstanceTemplate: Specifies the instance template to create the + // reservation. If you use this field, you must exclude the + // instanceProperties field. This field is optional, and it can be a + // full or partial URL. For example, the following are all valid URLs to + // an instance template: - + // https://www.googleapis.com/compute/v1/projects/project + // /global/instanceTemplates/instanceTemplate - + // projects/project/global/instanceTemplates/instanceTemplate - + // global/instanceTemplates/instanceTemplate + SourceInstanceTemplate string `json:"sourceInstanceTemplate,omitempty"` + // ForceSendFields is a list of field names (e.g. "AssuredCount") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -3054,6 +3172,17 @@ type AttachedDisk struct { // read-write mode. Mode string `json:"mode,omitempty"` + // SavedState: For LocalSSD disks on VM Instances in STOPPED or + // SUSPENDED state, this field is set to PRESERVED if the LocalSSD data + // has been saved to a persistent location by customer request. (see the + // discard_local_ssd option on Stop/Suspend). Read-only in the api. + // + // Possible values: + // "DISK_SAVED_STATE_UNSPECIFIED" - *[Default]* Disk state has not + // been preserved. + // "PRESERVED" - Disk state has been preserved. + SavedState string `json:"savedState,omitempty"` + // ShieldedInstanceInitialState: [Output Only] shielded vm initial state // stored on disk ShieldedInstanceInitialState *InitialStateConfig `json:"shieldedInstanceInitialState,omitempty"` @@ -3177,6 +3306,18 @@ type AttachedDiskInitializeParams struct { // see the Extreme persistent disk documentation. ProvisionedIops int64 `json:"provisionedIops,omitempty,string"` + // ProvisionedThroughput: Indicates how much throughput to provision for + // the disk. This sets the number of throughput mb per second that the + // disk can handle. Values must be between 1 and 7,124. + ProvisionedThroughput int64 `json:"provisionedThroughput,omitempty,string"` + + // ReplicaZones: Required for each regional disk associated with the + // instance. Specify the URLs of the zones where the disk should be + // replicated to. You must provide exactly two replica zones, and one + // zone must be the same as the instance zone. You can't use this option + // with boot disks. + ReplicaZones []string `json:"replicaZones,omitempty"` + // ResourceManagerTags: Resource manager tags to be bound to the disk. // Tag keys and values have the same definition as resource manager // tags. Keys must be in the format `tagKeys/{tag_key_id}`, and values @@ -4176,15 +4317,17 @@ func (s *AutoscalersScopedListWarningData) MarshalJSON() ([]byte, error) { // AutoscalingPolicy: Cloud Autoscaler policy. type AutoscalingPolicy struct { - // CoolDownPeriodSec: The number of seconds that the autoscaler waits - // before it starts collecting information from a new instance. This - // prevents the autoscaler from collecting information when the instance - // is initializing, during which the collected usage would not be - // reliable. The default time autoscaler waits is 60 seconds. Virtual - // machine initialization times might vary because of numerous factors. - // We recommend that you test how long an instance may take to - // initialize. To do this, create an instance and time the startup - // process. + // CoolDownPeriodSec: The number of seconds that your application takes + // to initialize on a VM instance. This is referred to as the + // initialization period (/compute/docs/autoscaler#cool_down_period). + // Specifying an accurate initialization period improves autoscaler + // decisions. For example, when scaling out, the autoscaler ignores data + // from VMs that are still initializing because those VMs might not yet + // represent normal usage of your application. The default + // initialization period is 60 seconds. Initialization periods might + // vary because of numerous factors. We recommend that you test how long + // your application takes to initialize. To do this, create a VM and + // time your application's startup process. CoolDownPeriodSec int64 `json:"coolDownPeriodSec,omitempty"` // CpuUtilization: Defines the CPU utilization policy that allows the @@ -4212,7 +4355,12 @@ type AutoscalingPolicy struct { // instances allowed. MinNumReplicas int64 `json:"minNumReplicas,omitempty"` - // Mode: Defines operating mode for this policy. + // Mode: Defines the operating mode for this policy. The following modes + // are available: - OFF: Disables the autoscaler but maintains its + // configuration. - ONLY_SCALE_OUT: Restricts the autoscaler to add VM + // instances only. - ON: Enables all autoscaler activities according to + // its policy. For more information, see "Turning off or restricting an + // autoscaler" // // Possible values: // "OFF" - Do not automatically scale the MIG in or out. The @@ -5416,12 +5564,16 @@ type BackendService struct { // "INVALID_LOAD_BALANCING_SCHEME" LoadBalancingScheme string `json:"loadBalancingScheme,omitempty"` - // LocalityLbPolicies: A list of locality load balancing policies to be - // used in order of preference. Either the policy or the customPolicy - // field should be set. Overrides any value set in the localityLbPolicy - // field. localityLbPolicies is only supported when the BackendService - // is referenced by a URL Map that is referenced by a target gRPC proxy - // that has the validateForProxyless field set to true. + // LocalityLbPolicies: A list of locality load-balancing policies to be + // used in order of preference. When you use localityLbPolicies, you + // must set at least one value for either the + // localityLbPolicies[].policy or the localityLbPolicies[].customPolicy + // field. localityLbPolicies overrides any value set in the + // localityLbPolicy field. For an example of how to use this field, see + // Define a list of preferred policies. Caution: This field and its + // children are intended for use in a service mesh that includes gRPC + // clients only. Envoy proxies can't use backend services that have this + // configuration. LocalityLbPolicies []*BackendServiceLocalityLoadBalancingPolicyConfig `json:"localityLbPolicies,omitempty"` // LocalityLbPolicy: The load balancing algorithm used within the scope @@ -5473,6 +5625,16 @@ type BackendService struct { // of the requests. // "ROUND_ROBIN" - This is a simple policy in which each healthy // backend is selected in round robin order. This is the default. + // "WEIGHTED_MAGLEV" - Per-instance weighted Load Balancing via health + // check reported weights. If set, the Backend Service must configure a + // non legacy HTTP-based Health Check, and health check replies are + // expected to contain non-standard HTTP response header field + // X-Load-Balancing-Endpoint-Weight to specify the per-instance weights. + // If set, Load Balancing is weighted based on the per-instance weights + // reported in the last processed health check replies, as long as every + // instance either reported a valid weight or had UNAVAILABLE_WEIGHT. + // Otherwise, Load Balancing remains equal-weight. This option is only + // supported in Network Load Balancing. LocalityLbPolicy string `json:"localityLbPolicy,omitempty"` // LogConfig: This field denotes the logging options for the load @@ -5491,6 +5653,10 @@ type BackendService struct { // loadBalancingScheme of the backend service is INTERNAL_SELF_MANAGED. MaxStreamDuration *Duration `json:"maxStreamDuration,omitempty"` + // Metadatas: Deployment metadata associated with the resource to be set + // by a GKE hub controller and read by the backend RCTH + Metadatas map[string]string `json:"metadatas,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -6551,12 +6717,13 @@ type BackendServiceLocalityLoadBalancingPolicyConfigCustomPolicy struct { // understood by a locally installed custom policy implementation. Data string `json:"data,omitempty"` - // Name: Identifies the custom policy. The value should match the type - // the custom implementation is registered with on the gRPC clients. It - // should follow protocol buffer message naming conventions and include - // the full path (e.g. myorg.CustomLbPolicy). The maximum length is 256 - // characters. Note that specifying the same custom policy more than - // once for a backend is not a valid configuration and will be rejected. + // Name: Identifies the custom policy. The value should match the name + // of a custom implementation registered on the gRPC clients. It should + // follow protocol buffer message naming conventions and include the + // full path (for example, myorg.CustomLbPolicy). The maximum length is + // 256 characters. Do not specify the same custom policy more than once + // for a backend. If you do, the configuration is rejected. For an + // example of how to use this field, see Use a custom policy. Name string `json:"name,omitempty"` // ForceSendFields is a list of field names (e.g. "Data") to @@ -6585,12 +6752,11 @@ func (s *BackendServiceLocalityLoadBalancingPolicyConfigCustomPolicy) MarshalJSO // BackendServiceLocalityLoadBalancingPolicyConfigPolicy: The // configuration for a built-in load balancing policy. type BackendServiceLocalityLoadBalancingPolicyConfigPolicy struct { - // Name: The name of a locality load balancer policy to be used. The - // value should be one of the predefined ones as supported by - // localityLbPolicy, although at the moment only ROUND_ROBIN is - // supported. This field should only be populated when the customPolicy - // field is not used. Note that specifying the same policy more than - // once for a backend is not a valid configuration and will be rejected. + // Name: The name of a locality load-balancing policy. Valid values + // include ROUND_ROBIN and, for Java clients, LEAST_REQUEST. For + // information about these values, see the description of + // localityLbPolicy. Do not specify the same policy more than once for a + // backend. If you do, the configuration is rejected. // // Possible values: // "INVALID_LB_POLICY" @@ -6613,6 +6779,16 @@ type BackendServiceLocalityLoadBalancingPolicyConfigPolicy struct { // of the requests. // "ROUND_ROBIN" - This is a simple policy in which each healthy // backend is selected in round robin order. This is the default. + // "WEIGHTED_MAGLEV" - Per-instance weighted Load Balancing via health + // check reported weights. If set, the Backend Service must configure a + // non legacy HTTP-based Health Check, and health check replies are + // expected to contain non-standard HTTP response header field + // X-Load-Balancing-Endpoint-Weight to specify the per-instance weights. + // If set, Load Balancing is weighted based on the per-instance weights + // reported in the last processed health check replies, as long as every + // instance either reported a valid weight or had UNAVAILABLE_WEIGHT. + // Otherwise, Load Balancing remains equal-weight. This option is only + // supported in Network Load Balancing. Name string `json:"name,omitempty"` // ForceSendFields is a list of field names (e.g. "Name") to @@ -6645,6 +6821,25 @@ type BackendServiceLogConfig struct { // traffic served by this backend service. The default value is false. Enable bool `json:"enable,omitempty"` + // OptionalFields: This field can only be specified if logging is + // enabled for this backend service and "logConfig.optionalMode" was set + // to CUSTOM. Contains a list of optional fields you want to include in + // the logs. For example: serverInstance, serverGkeDetails.cluster, + // serverGkeDetails.pod.podNamespace + OptionalFields []string `json:"optionalFields,omitempty"` + + // OptionalMode: This field can only be specified if logging is enabled + // for this backend service. Configures whether all, none or a subset of + // optional fields should be added to the reported logs. One of + // [INCLUDE_ALL_OPTIONAL, EXCLUDE_ALL_OPTIONAL, CUSTOM]. Default is + // EXCLUDE_ALL_OPTIONAL. + // + // Possible values: + // "CUSTOM" - A subset of optional fields. + // "EXCLUDE_ALL_OPTIONAL" - None optional fields. + // "INCLUDE_ALL_OPTIONAL" - All optional fields. + OptionalMode string `json:"optionalMode,omitempty"` + // SampleRate: This field can only be specified if logging is enabled // for this backend service. The value of the field must be in [0, 1]. // This configures the sampling rate of requests to the load balancer @@ -7162,7 +7357,9 @@ type Binding struct { // (https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). // For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. // * `group:{emailid}`: An email address that represents a Google group. - // For example, `admins@example.com`. * + // For example, `admins@example.com`. * `domain:{domain}`: The G Suite + // domain (primary) that represents all the users of that domain. For + // example, `google.com` or `example.com`. * // `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus // unique identifier) representing a user that has been recently // deleted. For example, `alice@example.com?uid=123456789012345678901`. @@ -7179,9 +7376,7 @@ type Binding struct { // that has been recently deleted. For example, // `admins@example.com?uid=123456789012345678901`. If the group is // recovered, this value reverts to `group:{emailid}` and the recovered - // group retains the role in the binding. * `domain:{domain}`: The G - // Suite domain (primary) that represents all the users of that domain. - // For example, `google.com` or `example.com`. + // group retains the role in the binding. Members []string `json:"members,omitempty"` // Role: Role that is assigned to the list of `members`, or principals. @@ -7556,7 +7751,7 @@ type Commitment struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // SplitSourceCommitment: Source commitment to be splitted into a new + // SplitSourceCommitment: Source commitment to be split into a new // commitment. SplitSourceCommitment string `json:"splitSourceCommitment,omitempty"` @@ -7570,7 +7765,8 @@ type Commitment struct { // // Possible values: // "ACTIVE" - // "CANCELLED" + // "CANCELLED" - Deprecate CANCELED status. Will use separate status + // to differentiate cancel by mergeCud or manual cancellation. // "CREATING" // "EXPIRED" // "NOT_YET_ACTIVE" @@ -7590,11 +7786,13 @@ type Commitment struct { // "ACCELERATOR_OPTIMIZED" // "COMPUTE_OPTIMIZED" // "COMPUTE_OPTIMIZED_C2D" + // "COMPUTE_OPTIMIZED_C3" // "GENERAL_PURPOSE" // "GENERAL_PURPOSE_E2" // "GENERAL_PURPOSE_N2" // "GENERAL_PURPOSE_N2D" // "GENERAL_PURPOSE_T2D" + // "GRAPHICS_OPTIMIZED" // "MEMORY_OPTIMIZED" // "MEMORY_OPTIMIZED_M3" // "TYPE_UNSPECIFIED" @@ -8642,6 +8840,13 @@ type Disk struct { // "X86_64" - Machines with architecture X86_64 Architecture string `json:"architecture,omitempty"` + // AsyncPrimaryDisk: Disk asynchronously replicated into this disk. + AsyncPrimaryDisk *DiskAsyncReplication `json:"asyncPrimaryDisk,omitempty"` + + // AsyncSecondaryDisks: [Output Only] A list of disks this disk is + // asynchronously replicated to. + AsyncSecondaryDisks map[string]DiskAsyncReplicationList `json:"asyncSecondaryDisks,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -8746,6 +8951,11 @@ type Disk struct { // see the Extreme persistent disk documentation. ProvisionedIops int64 `json:"provisionedIops,omitempty,string"` + // ProvisionedThroughput: Indicates how much throughput to provision for + // the disk. This sets the number of throughput mb per second that the + // disk can handle. Values must be between 1 and 7,124. + ProvisionedThroughput int64 `json:"provisionedThroughput,omitempty,string"` + // Region: [Output Only] URL of the region where the disk resides. Only // applicable for regional resources. You must specify this field as // part of the HTTP request URL. It is not settable as a field in the @@ -8760,6 +8970,10 @@ type Disk struct { // automatic snapshot creations. ResourcePolicies []string `json:"resourcePolicies,omitempty"` + // ResourceStatus: [Output Only] Status information for the disk + // resource. + ResourceStatus *DiskResourceStatus `json:"resourceStatus,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -8775,6 +8989,16 @@ type Disk struct { // source. Acceptable values are 1 to 65536, inclusive. SizeGb int64 `json:"sizeGb,omitempty,string"` + // SourceConsistencyGroupPolicy: [Output Only] URL of the + // DiskConsistencyGroupPolicy for a secondary disk that was created + // using a consistency group. + SourceConsistencyGroupPolicy string `json:"sourceConsistencyGroupPolicy,omitempty"` + + // SourceConsistencyGroupPolicyId: [Output Only] ID of the + // DiskConsistencyGroupPolicy for a secondary disk that was created + // using a consistency group. + SourceConsistencyGroupPolicyId string `json:"sourceConsistencyGroupPolicyId,omitempty"` + // SourceDisk: The source disk used to create this disk. You can provide // this as a partial or full URL to the resource. For example, the // following are valid values: - @@ -9099,6 +9323,86 @@ func (s *DiskAggregatedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type DiskAsyncReplication struct { + // ConsistencyGroupPolicy: [Output Only] URL of the + // DiskConsistencyGroupPolicy if replication was started on the disk as + // a member of a group. + ConsistencyGroupPolicy string `json:"consistencyGroupPolicy,omitempty"` + + // ConsistencyGroupPolicyId: [Output Only] ID of the + // DiskConsistencyGroupPolicy if replication was started on the disk as + // a member of a group. + ConsistencyGroupPolicyId string `json:"consistencyGroupPolicyId,omitempty"` + + // Disk: The other disk asynchronously replicated to or from the current + // disk. You can provide this as a partial or full URL to the resource. + // For example, the following are valid values: - + // https://www.googleapis.com/compute/v1/projects/project/zones/zone + // /disks/disk - projects/project/zones/zone/disks/disk - + // zones/zone/disks/disk + Disk string `json:"disk,omitempty"` + + // DiskId: [Output Only] The unique ID of the other disk asynchronously + // replicated to or from the current disk. This value identifies the + // exact disk that was used to create this replication. For example, if + // you started replicating the persistent disk from a disk that was + // later deleted and recreated under the same name, the disk ID would + // identify the exact version of the disk that was used. + DiskId string `json:"diskId,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "ConsistencyGroupPolicy") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ConsistencyGroupPolicy") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DiskAsyncReplication) MarshalJSON() ([]byte, error) { + type NoMethod DiskAsyncReplication + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskAsyncReplicationList struct { + AsyncReplicationDisk *DiskAsyncReplication `json:"asyncReplicationDisk,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "AsyncReplicationDisk") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AsyncReplicationDisk") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DiskAsyncReplicationList) MarshalJSON() ([]byte, error) { + type NoMethod DiskAsyncReplicationList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // DiskInstantiationConfig: A specification of the desired way to // instantiate a disk in the instance template when its created from a // source instance. @@ -9440,6 +9744,70 @@ func (s *DiskParams) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type DiskResourceStatus struct { + AsyncPrimaryDisk *DiskResourceStatusAsyncReplicationStatus `json:"asyncPrimaryDisk,omitempty"` + + // AsyncSecondaryDisks: Key: disk, value: AsyncReplicationStatus message + AsyncSecondaryDisks map[string]DiskResourceStatusAsyncReplicationStatus `json:"asyncSecondaryDisks,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AsyncPrimaryDisk") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AsyncPrimaryDisk") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *DiskResourceStatus) MarshalJSON() ([]byte, error) { + type NoMethod DiskResourceStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type DiskResourceStatusAsyncReplicationStatus struct { + // Possible values: + // "ACTIVE" - Replication is active. + // "CREATED" - Secondary disk is created and is waiting for + // replication to start. + // "STARTING" - Replication is starting. + // "STATE_UNSPECIFIED" + // "STOPPED" - Replication is stopped. + // "STOPPING" - Replication is stopping. + State string `json:"state,omitempty"` + + // ForceSendFields is a list of field names (e.g. "State") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "State") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *DiskResourceStatusAsyncReplicationStatus) MarshalJSON() ([]byte, error) { + type NoMethod DiskResourceStatusAsyncReplicationStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // DiskType: Represents a Disk Type resource. Google Compute Engine has // two Disk Type resources: * Regional // (/compute/docs/reference/rest/v1/regionDiskTypes) * Zonal @@ -12073,8 +12441,7 @@ func (s *FirewallPolicyListWarningData) MarshalJSON() ([]byte, error) { // matches this condition (allow or deny). type FirewallPolicyRule struct { // Action: The Action to perform when the client connection triggers the - // rule. Can currently be either "allow" or "deny()" where valid values - // for status are 403, 404, and 502. + // rule. Valid actions are "allow", "deny" and "goto_next". Action string `json:"action,omitempty"` // Description: An optional description for this resource. @@ -12173,38 +12540,77 @@ func (s *FirewallPolicyRule) MarshalJSON() ([]byte, error) { // FirewallPolicyRuleMatcher: Represents a match condition that incoming // traffic is evaluated against. Exactly one field must be specified. type FirewallPolicyRuleMatcher struct { + // DestAddressGroups: Address groups which should be matched against the + // traffic destination. Maximum number of destination address groups is + // 10. + DestAddressGroups []string `json:"destAddressGroups,omitempty"` + + // DestFqdns: Fully Qualified Domain Name (FQDN) which should be matched + // against traffic destination. Maximum number of destination fqdn + // allowed is 100. + DestFqdns []string `json:"destFqdns,omitempty"` + // DestIpRanges: CIDR IP address range. Maximum number of destination // CIDR IP ranges allowed is 5000. DestIpRanges []string `json:"destIpRanges,omitempty"` + // DestRegionCodes: Region codes whose IP addresses will be used to + // match for destination of traffic. Should be specified as 2 letter + // country code defined as per ISO 3166 alpha-2 country codes. ex."US" + // Maximum number of dest region codes allowed is 5000. + DestRegionCodes []string `json:"destRegionCodes,omitempty"` + + // DestThreatIntelligences: Names of Network Threat Intelligence lists. + // The IPs in these lists will be matched against traffic destination. + DestThreatIntelligences []string `json:"destThreatIntelligences,omitempty"` + // Layer4Configs: Pairs of IP protocols and ports that the rule should // match. Layer4Configs []*FirewallPolicyRuleMatcherLayer4Config `json:"layer4Configs,omitempty"` + // SrcAddressGroups: Address groups which should be matched against the + // traffic source. Maximum number of source address groups is 10. + SrcAddressGroups []string `json:"srcAddressGroups,omitempty"` + + // SrcFqdns: Fully Qualified Domain Name (FQDN) which should be matched + // against traffic source. Maximum number of source fqdn allowed is 100. + SrcFqdns []string `json:"srcFqdns,omitempty"` + // SrcIpRanges: CIDR IP address range. Maximum number of source CIDR IP // ranges allowed is 5000. SrcIpRanges []string `json:"srcIpRanges,omitempty"` + // SrcRegionCodes: Region codes whose IP addresses will be used to match + // for source of traffic. Should be specified as 2 letter country code + // defined as per ISO 3166 alpha-2 country codes. ex."US" Maximum number + // of source region codes allowed is 5000. + SrcRegionCodes []string `json:"srcRegionCodes,omitempty"` + // SrcSecureTags: List of secure tag values, which should be matched at // the source of the traffic. For INGRESS rule, if all the srcSecureTag // are INEFFECTIVE, and there is no srcIpRange, this rule will be // ignored. Maximum number of source tag values allowed is 256. SrcSecureTags []*FirewallPolicyRuleSecureTag `json:"srcSecureTags,omitempty"` - // ForceSendFields is a list of field names (e.g. "DestIpRanges") to - // unconditionally include in API requests. By default, fields with + // SrcThreatIntelligences: Names of Network Threat Intelligence lists. + // The IPs in these lists will be matched against traffic source. + SrcThreatIntelligences []string `json:"srcThreatIntelligences,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DestAddressGroups") + // to unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be // sent to the server regardless of whether the field is empty or not. // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "DestIpRanges") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. + // NullFields is a list of field names (e.g. "DestAddressGroups") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. NullFields []string `json:"-"` } @@ -12403,12 +12809,24 @@ type ForwardingRule struct { // clients in the same region as the internal load balancer. AllowGlobalAccess bool `json:"allowGlobalAccess,omitempty"` + // AllowPscGlobalAccess: This is used in PSC consumer ForwardingRule to + // control whether the PSC endpoint can be accessed from another region. + AllowPscGlobalAccess bool `json:"allowPscGlobalAccess,omitempty"` + // BackendService: Identifies the backend service to which the // forwarding rule sends traffic. Required for Internal TCP/UDP Load // Balancing and Network Load Balancing; must be omitted for all other // load balancer types. BackendService string `json:"backendService,omitempty"` + // BaseForwardingRule: [Output Only] The URL for the corresponding base + // Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule + // that has the same IP address, protocol, and port settings with the + // current Forwarding Rule, but without sourceIPRanges specified. Always + // empty if the current Forwarding Rule does not have sourceIPRanges + // specified. + BaseForwardingRule string `json:"baseForwardingRule,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -12513,9 +12931,10 @@ type ForwardingRule struct { // Network: This field is not used for external load balancing. For // Internal TCP/UDP Load Balancing, this field identifies the network // that the load balanced IP should belong to for this Forwarding Rule. - // If this field is not specified, the default network will be used. For - // Private Service Connect forwarding rules that forward traffic to - // Google APIs, a network must be provided. + // If the subnetwork is specified, the network of the subnetwork will be + // used. If neither subnetwork nor this field is specified, the default + // network will be used. For Private Service Connect forwarding rules + // that forward traffic to Google APIs, a network must be provided. Network string `json:"network,omitempty"` // NetworkTier: This signifies the networking tier used for configuring @@ -12621,6 +13040,15 @@ type ForwardingRule struct { // balancing. ServiceName string `json:"serviceName,omitempty"` + // SourceIpRanges: If not empty, this Forwarding Rule will only forward + // the traffic when the source IP address matches one of the IP + // addresses or CIDR ranges set here. Note that a Forwarding Rule can + // only have up to 64 source IP ranges, and this field can only be used + // with a regional Forwarding Rule whose scheme is EXTERNAL. Each + // source_ip_range entry should be either an IP address (for example, + // 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24). + SourceIpRanges []string `json:"sourceIpRanges,omitempty"` + // Subnetwork: This field identifies the subnetwork that the load // balanced IP should belong to for this Forwarding Rule, used in // internal load balancing and network load balancing with IPv6. If the @@ -12633,13 +13061,15 @@ type ForwardingRule struct { // traffic. For regional forwarding rules, this target must be in the // same region as the forwarding rule. For global forwarding rules, this // target must be a global load balancing resource. The forwarded - // traffic must be of a type appropriate to the target object. For more - // information, see the "Target" column in Port specifications + // traffic must be of a type appropriate to the target object. - For + // load balancers, see the "Target" column in Port specifications // (https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). - // For Private Service Connect forwarding rules that forward traffic to - // Google APIs, provide the name of a supported Google API bundle: - + // - For Private Service Connect forwarding rules that forward traffic + // to Google APIs, provide the name of a supported Google API bundle: - // vpc-sc - APIs that support VPC Service Controls. - all-apis - All - // supported Google APIs. + // supported Google APIs. - For Private Service Connect forwarding rules + // that forward traffic to managed services, the target must be a + // service attachment. Target string `json:"target,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -13362,6 +13792,43 @@ func (s *GRPCHealthCheck) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type GlobalAddressesMoveRequest struct { + // Description: An optional destination address description if intended + // to be different from the source. + Description string `json:"description,omitempty"` + + // DestinationAddress: The URL of the destination address to move to. + // This can be a full or partial URL. For example, the following are all + // valid URLs to a address: - + // https://www.googleapis.com/compute/v1/projects/project + // /global/addresses/address - projects/project/global/addresses/address + // Note that destination project must be different from the source + // project. So /global/addresses/address is not valid partial url. + DestinationAddress string `json:"destinationAddress,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GlobalAddressesMoveRequest) MarshalJSON() ([]byte, error) { + type NoMethod GlobalAddressesMoveRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type GlobalNetworkEndpointGroupsAttachEndpointsRequest struct { // NetworkEndpoints: The list of network endpoints to be attached. NetworkEndpoints []*NetworkEndpoint `json:"networkEndpoints,omitempty"` @@ -13651,8 +14118,8 @@ type GuestOsFeature struct { // commas to separate values. Set to one or more of the following // values: - VIRTIO_SCSI_MULTIQUEUE - WINDOWS - MULTI_IP_SUBNET - // UEFI_COMPATIBLE - GVNIC - SEV_CAPABLE - SUSPEND_RESUME_COMPATIBLE - - // SEV_SNP_CAPABLE For more information, see Enabling guest operating - // system features. + // SEV_LIVE_MIGRATABLE - SEV_SNP_CAPABLE For more information, see + // Enabling guest operating system features. // // Possible values: // "FEATURE_TYPE_UNSPECIFIED" @@ -13660,6 +14127,8 @@ type GuestOsFeature struct { // "MULTI_IP_SUBNET" // "SECURE_BOOT" // "SEV_CAPABLE" + // "SEV_LIVE_MIGRATABLE" + // "SEV_SNP_CAPABLE" // "UEFI_COMPATIBLE" // "VIRTIO_SCSI_MULTIQUEUE" // "WINDOWS" @@ -13976,12 +14445,12 @@ func (s *HTTPSHealthCheck) MarshalJSON() ([]byte, error) { // (/compute/docs/reference/rest/v1/regionHealthChecks) Internal HTTP(S) // load balancers must use regional health checks // (`compute.v1.regionHealthChecks`). Traffic Director must use global -// health checks (`compute.v1.HealthChecks`). Internal TCP/UDP load +// health checks (`compute.v1.healthChecks`). Internal TCP/UDP load // balancers can use either regional or global health checks -// (`compute.v1.regionHealthChecks` or `compute.v1.HealthChecks`). +// (`compute.v1.regionHealthChecks` or `compute.v1.healthChecks`). // External HTTP(S), TCP proxy, and SSL proxy load balancers as well as // managed instance group auto-healing must use global health checks -// (`compute.v1.HealthChecks`). Backend service-based network load +// (`compute.v1.healthChecks`). Backend service-based network load // balancers must use regional health checks // (`compute.v1.regionHealthChecks`). Target pool-based network load // balancers must use legacy HTTP health checks @@ -15059,7 +15528,7 @@ type HealthStatus struct { // instance. ForwardingRuleIp string `json:"forwardingRuleIp,omitempty"` - // HealthState: Health state of the instance. + // HealthState: Health state of the IPv4 address of the instance. // // Possible values: // "HEALTHY" @@ -15144,10 +15613,10 @@ type HealthStatusForNetworkEndpoint struct { // the health checks configured. // // Possible values: - // "DRAINING" - // "HEALTHY" - // "UNHEALTHY" - // "UNKNOWN" + // "DRAINING" - Endpoint is being drained. + // "HEALTHY" - Endpoint is healthy. + // "UNHEALTHY" - Endpoint is unhealthy. + // "UNKNOWN" - Health status of the endpoint is unknown. HealthState string `json:"healthState,omitempty"` // ForceSendFields is a list of field names (e.g. "BackendService") to @@ -15643,6 +16112,7 @@ type HttpHealthCheck struct { // RequestPath: The request path of the HTTP health check request. The // default value is /. This field does not support query parameters. + // Must comply with RFC3986. RequestPath string `json:"requestPath,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. @@ -16299,6 +16769,15 @@ type HttpRouteRuleMatch struct { // validateForProxyless field set to true. MetadataFilters []*MetadataFilter `json:"metadataFilters,omitempty"` + // PathTemplateMatch: If specified, the route is a pattern match + // expression that must match the :path header once the query string is + // removed. A pattern match allows you to match - The value must be + // between 1 and 1024 characters - The pattern must start with a leading + // slash ("/") - There may be no more than 5 operators in pattern + // Precisely one of prefix_match, full_path_match, regex_match or + // path_template_match must be set. + PathTemplateMatch string `json:"pathTemplateMatch,omitempty"` + // PrefixMatch: For satisfying the matchRule condition, the request's // path must begin with the specified prefixMatch. prefixMatch must // begin with a /. The value must be from 1 to 1024 characters. Only one @@ -16393,7 +16872,7 @@ type HttpsHealthCheck struct { Port int64 `json:"port,omitempty"` // RequestPath: The request path of the HTTPS health check request. The - // default value is "/". + // default value is "/". Must comply with RFC3986. RequestPath string `json:"requestPath,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. @@ -16658,11 +17137,14 @@ type Image struct { // (in GB). DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` - // Family: The name of the image family to which this image belongs. You - // can create disks by specifying an image family instead of a specific - // image name. The image family always returns its latest image that is - // not deprecated. The name of the image family must comply with - // RFC1035. + // Family: The name of the image family to which this image belongs. The + // image family name can be from a publicly managed image family + // provided by Compute Engine, or from a custom image family you create. + // For example, centos-stream-9 is a publicly available image family. + // For more information, see Image family best practices. When creating + // disks, you can specify an image family instead of a specific image + // name. The image family always returns its latest image that is not + // deprecated. The name of the image family must comply with RFC1035. Family string `json:"family,omitempty"` // GuestOsFeatures: A list of features to enable on the guest operating @@ -17366,9 +17848,9 @@ type Instance struct { // cycle. // // Possible values: - // "DEPROVISIONING" - The Nanny is halted and we are performing tear - // down tasks like network deprogramming, releasing quota, IP, tearing - // down disks etc. + // "DEPROVISIONING" - The instance is halted and we are performing + // tear down tasks like network deprogramming, releasing quota, IP, + // tearing down disks etc. // "PROVISIONING" - Resources are being allocated for the instance. // "REPAIRING" - The instance is in repair. // "RUNNING" - The instance is running. @@ -18621,13 +19103,14 @@ type InstanceGroupManagerAutoHealingPolicy struct { // HealthCheck: The URL for the health check that signals autohealing. HealthCheck string `json:"healthCheck,omitempty"` - // InitialDelaySec: The number of seconds that the managed instance - // group waits before it applies autohealing policies to new instances - // or recently recreated instances. This initial delay allows instances - // to initialize and run their startup scripts before the instance group - // determines that they are UNHEALTHY. This prevents the managed - // instance group from recreating its instances prematurely. This value - // must be from range [0, 3600]. + // InitialDelaySec: The initial delay is the number of seconds that a + // new VM takes to initialize and run its startup script. During a VM's + // initial delay period, the MIG ignores unsuccessful health checks + // because the VM might be in the startup process. This prevents the MIG + // from prematurely recreating a VM. If the health check receives a + // healthy response during the initial delay, it indicates that the + // startup process is complete and the VM is ready. The value of initial + // delay must be between 0 and 3600 seconds. The default value is 0. InitialDelaySec int64 `json:"initialDelaySec,omitempty"` // ForceSendFields is a list of field names (e.g. "HealthCheck") to @@ -19298,7 +19781,9 @@ func (s *InstanceGroupManagersCreateInstancesRequest) MarshalJSON() ([]byte, err type InstanceGroupManagersDeleteInstancesRequest struct { // Instances: The URLs of one or more instances to delete. This can be a // full URL or a partial URL, such as - // zones/[ZONE]/instances/[INSTANCE_NAME]. + // zones/[ZONE]/instances/[INSTANCE_NAME]. Queued instances do not have + // URL and can be deleted only by name. One cannot specify both URLs and + // names in a single request. Instances []string `json:"instances,omitempty"` // SkipInstancesOnValidationError: Specifies whether the request should @@ -21248,6 +21733,10 @@ type InstanceTemplate struct { // Properties: The instance properties for this instance template. Properties *InstanceProperties `json:"properties,omitempty"` + // Region: [Output Only] URL of the region where the instance template + // resides. Only applicable for regional resources. + Region string `json:"region,omitempty"` + // SelfLink: [Output Only] The URL for this instance template. The // server defines this URL. SelfLink string `json:"selfLink,omitempty"` @@ -21291,17 +21780,17 @@ func (s *InstanceTemplate) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InstanceTemplateList: A list of instance templates. -type InstanceTemplateList struct { +// InstanceTemplateAggregatedList: Contains a list of +// InstanceTemplatesScopedList. +type InstanceTemplateAggregatedList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of InstanceTemplate resources. - Items []*InstanceTemplate `json:"items,omitempty"` + // Items: A list of InstanceTemplatesScopedList resources. + Items map[string]InstanceTemplatesScopedList `json:"items,omitempty"` - // Kind: [Output Only] The resource type, which is always - // compute#instanceTemplatesListResponse for instance template lists. + // Kind: Type of resource. Kind string `json:"kind,omitempty"` // NextPageToken: [Output Only] This token allows you to get the next @@ -21316,7 +21805,197 @@ type InstanceTemplateList struct { SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *InstanceTemplateListWarning `json:"warning,omitempty"` + Warning *InstanceTemplateAggregatedListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplateAggregatedList) MarshalJSON() ([]byte, error) { + type NoMethod InstanceTemplateAggregatedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InstanceTemplateAggregatedListWarning: [Output Only] Informational +// warning message. +type InstanceTemplateAggregatedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*InstanceTemplateAggregatedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplateAggregatedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InstanceTemplateAggregatedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceTemplateAggregatedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplateAggregatedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InstanceTemplateAggregatedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InstanceTemplateList: A list of instance templates. +type InstanceTemplateList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of InstanceTemplate resources. + Items []*InstanceTemplate `json:"items,omitempty"` + + // Kind: [Output Only] The resource type, which is always + // compute#instanceTemplatesListResponse for instance template lists. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *InstanceTemplateListWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -21481,6 +22160,176 @@ func (s *InstanceTemplateListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstanceTemplatesScopedList struct { + // InstanceTemplates: [Output Only] A list of instance templates that + // are contained within the specified project and zone. + InstanceTemplates []*InstanceTemplate `json:"instanceTemplates,omitempty"` + + // Warning: [Output Only] An informational warning that replaces the + // list of instance templates when the list is empty. + Warning *InstanceTemplatesScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InstanceTemplates") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InstanceTemplates") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplatesScopedList) MarshalJSON() ([]byte, error) { + type NoMethod InstanceTemplatesScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InstanceTemplatesScopedListWarning: [Output Only] An informational +// warning that replaces the list of instance templates when the list is +// empty. +type InstanceTemplatesScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*InstanceTemplatesScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplatesScopedListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InstanceTemplatesScopedListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InstanceTemplatesScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstanceTemplatesScopedListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InstanceTemplatesScopedListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstanceWithNamedPorts struct { // Instance: [Output Only] The URL of the instance. Instance string `json:"instance,omitempty"` @@ -21492,9 +22341,9 @@ type InstanceWithNamedPorts struct { // Status: [Output Only] The status of the instance. // // Possible values: - // "DEPROVISIONING" - The Nanny is halted and we are performing tear - // down tasks like network deprogramming, releasing quota, IP, tearing - // down disks etc. + // "DEPROVISIONING" - The instance is halted and we are performing + // tear down tasks like network deprogramming, releasing quota, IP, + // tearing down disks etc. // "PROVISIONING" - Resources are being allocated for the instance. // "REPAIRING" - The instance is in repair. // "RUNNING" - The instance is running. @@ -21956,6 +22805,39 @@ func (s *InstancesSetMinCpuPlatformRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type InstancesSetNameRequest struct { + // CurrentName: The current name of this resource, used to prevent + // conflicts. Provide the latest name when making a request to change + // name. + CurrentName string `json:"currentName,omitempty"` + + // Name: The name to be applied to the instance. Needs to be RFC 1035 + // compliant. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CurrentName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CurrentName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InstancesSetNameRequest) MarshalJSON() ([]byte, error) { + type NoMethod InstancesSetNameRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type InstancesSetServiceAccountRequest struct { // Email: Email address of the service account. Email string `json:"email,omitempty"` @@ -22053,9 +22935,9 @@ func (s *Int64RangeMatch) MarshalJSON() ([]byte, error) { } // Interconnect: Represents an Interconnect resource. An Interconnect -// resource is a dedicated connection between the GCP network and your -// on-premises network. For more information, read the Dedicated -// Interconnect Overview. +// resource is a dedicated connection between the Google Cloud network +// and your on-premises network. For more information, read the +// Dedicated Interconnect Overview. type Interconnect struct { // AdminEnabled: Administrative status of the interconnect. When this is // set to true, the Interconnect is functional and can carry traffic. @@ -22120,6 +23002,21 @@ type Interconnect struct { // for interconnects. Kind string `json:"kind,omitempty"` + // LabelFingerprint: A fingerprint for the labels being applied to this + // Interconnect, which is essentially a hash of the labels set used for + // optimistic locking. The fingerprint is initially generated by Compute + // Engine and changes after every request to modify or update labels. + // You must always provide an up-to-date fingerprint hash in order to + // update or change labels, otherwise the request will fail with error + // 412 conditionNotMet. To see the latest fingerprint, make a get() + // request to retrieve an Interconnect. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels for this resource. These can only be added or modified + // by the setLabels method. Each label key/value pair must comply with + // RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + // LinkType: Type of link requested, which can take one of the following // values: - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics - // LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note that @@ -22148,8 +23045,9 @@ type Interconnect struct { // NocContactEmail: Email address to contact the customer NOC for // operations and maintenance notifications regarding this Interconnect. // If specified, this will be used for notifications in addition to all - // other forms described, such as Stackdriver logs alerting and Cloud - // Notifications. + // other forms described, such as Cloud Monitoring logs alerting and + // Cloud Notifications. This field is required for users who sign up for + // Cloud Interconnect using workforce identity federation. NocContactEmail string `json:"nocContactEmail,omitempty"` // OperationalStatus: [Output Only] The current status of this @@ -22179,13 +23077,16 @@ type Interconnect struct { // provisioned in this interconnect. ProvisionedLinkCount int64 `json:"provisionedLinkCount,omitempty"` + // RemoteLocation: Indicates that this is a Cross-Cloud Interconnect. + // This field specifies the location outside of Google's network that + // the interconnect is connected to. + RemoteLocation string `json:"remoteLocation,omitempty"` + // RequestedLinkCount: Target number of physical links in the link // bundle, as requested by the customer. RequestedLinkCount int64 `json:"requestedLinkCount,omitempty"` - // SatisfiesPzs: [Output Only] Set to true if the resource satisfies the - // zone separation organization policy constraints and false otherwise. - // Defaults to false if the field is not present. + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. @@ -22296,6 +23197,11 @@ type InterconnectAttachment struct { // CloudRouterIpv6InterfaceId: This field is not available. CloudRouterIpv6InterfaceId string `json:"cloudRouterIpv6InterfaceId,omitempty"` + // ConfigurationConstraints: [Output Only] Constraints for this + // attachment, if any. The attachment does not work if these constraints + // are not met. + ConfigurationConstraints *InterconnectAttachmentConfigurationConstraints `json:"configurationConstraints,omitempty"` + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` @@ -22388,14 +23294,28 @@ type InterconnectAttachment struct { // attachment. If this field is not specified when creating the VLAN // attachment, then later on when creating an HA VPN gateway on this // VLAN attachment, the HA VPN gateway's IP address is allocated from - // the regional external IP address pool. Not currently available - // publicly. + // the regional external IP address pool. IpsecInternalAddresses []string `json:"ipsecInternalAddresses,omitempty"` // Kind: [Output Only] Type of the resource. Always // compute#interconnectAttachment for interconnect attachments. Kind string `json:"kind,omitempty"` + // LabelFingerprint: A fingerprint for the labels being applied to this + // InterconnectAttachment, which is essentially a hash of the labels set + // used for optimistic locking. The fingerprint is initially generated + // by Compute Engine and changes after every request to modify or update + // labels. You must always provide an up-to-date fingerprint hash in + // order to update or change labels, otherwise the request will fail + // with error 412 conditionNotMet. To see the latest fingerprint, make a + // get() request to retrieve an InterconnectAttachment. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels for this resource. These can only be added or modified + // by the setLabels method. Each label key/value pair must comply with + // RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + // Mtu: Maximum Transmission Unit (MTU), in bytes, of packets passing // through this interconnect attachment. Only 1440 and 1500 are allowed. // If not specified, the value will default to 1440. @@ -22451,15 +23371,21 @@ type InterconnectAttachment struct { // body. Region string `json:"region,omitempty"` + // RemoteService: [Output Only] If the attachment is on a Cross-Cloud + // Interconnect connection, this field contains the interconnect's + // remote location service provider. Example values: "Amazon Web + // Services" "Microsoft Azure". The field is set only for attachments on + // Cross-Cloud Interconnect connections. Its value is copied from the + // InterconnectRemoteLocation remoteService field. + RemoteService string `json:"remoteService,omitempty"` + // Router: URL of the Cloud Router to be used for dynamic routing. This // router must be in the same region as this InterconnectAttachment. The // InterconnectAttachment will automatically connect the Interconnect to // the network & region within which the Cloud Router is configured. Router string `json:"router,omitempty"` - // SatisfiesPzs: [Output Only] Set to true if the resource satisfies the - // zone separation organization policy constraints and false otherwise. - // Defaults to false if the field is not present. + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. @@ -22516,6 +23442,16 @@ type InterconnectAttachment struct { // yet, because turnup is not complete. State string `json:"state,omitempty"` + // SubnetLength: Length of the IPv4 subnet mask. Allowed values: - 29 + // (default) - 30 The default value is 29, except for Cross-Cloud + // Interconnect connections that use an InterconnectRemoteLocation with + // a constraints.subnetLengthRange.min equal to 30. For example, + // connections that use an Azure remote location fall into this + // category. In these cases, the default value is 30, and requesting 29 + // returns an error. Where both 29 and 30 are allowed, 29 is preferred, + // because it gives Google Cloud Support more debugging visibility. + SubnetLength int64 `json:"subnetLength,omitempty"` + // Type: The type of interconnect attachment this is, which can take one // of the following values: - DEDICATED: an attachment to a Dedicated // Interconnect. - PARTNER: an attachment to a Partner Interconnect, @@ -22531,7 +23467,7 @@ type InterconnectAttachment struct { Type string `json:"type,omitempty"` // VlanTag8021q: The IEEE 802.1Q VLAN tag for this attachment, in the - // range 2-4094. Only specified at creation time. + // range 2-4093. Only specified at creation time. VlanTag8021q int64 `json:"vlanTag8021q,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -22754,139 +23690,37 @@ func (s *InterconnectAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InterconnectAttachmentList: Response to the list request, and -// contains a list of interconnect attachments. -type InterconnectAttachmentList struct { - // Id: [Output Only] Unique identifier for the resource; defined by the - // server. - Id string `json:"id,omitempty"` - - // Items: A list of InterconnectAttachment resources. - Items []*InterconnectAttachment `json:"items,omitempty"` - - // Kind: [Output Only] Type of resource. Always - // compute#interconnectAttachmentList for lists of interconnect - // attachments. - Kind string `json:"kind,omitempty"` - - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. - NextPageToken string `json:"nextPageToken,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for this resource. - SelfLink string `json:"selfLink,omitempty"` - - // Warning: [Output Only] Informational warning message. - Warning *InterconnectAttachmentListWarning `json:"warning,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` - - // ForceSendFields is a list of field names (e.g. "Id") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Id") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *InterconnectAttachmentList) MarshalJSON() ([]byte, error) { - type NoMethod InterconnectAttachmentList - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -// InterconnectAttachmentListWarning: [Output Only] Informational -// warning message. -type InterconnectAttachmentListWarning struct { - // Code: [Output Only] A warning code, if applicable. For example, - // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in - // the response. +type InterconnectAttachmentConfigurationConstraints struct { + // BgpMd5: [Output Only] Whether the attachment's BGP session + // requires/allows/disallows BGP MD5 authentication. This can take one + // of the following values: MD5_OPTIONAL, MD5_REQUIRED, MD5_UNSUPPORTED. + // For example, a Cross-Cloud Interconnect connection to a remote cloud + // provider that requires BGP MD5 authentication has the + // interconnectRemoteLocation + // attachment_configuration_constraints.bgp_md5 field set to + // MD5_REQUIRED, and that property is propagated to the attachment. + // Similarly, if BGP MD5 is MD5_UNSUPPORTED, an error is returned if MD5 + // is requested. // // Possible values: - // "CLEANUP_FAILED" - Warning about failed cleanup of transient - // changes made by a failed operation. - // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was - // created. - // "DEPRECATED_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as deprecated - // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk - // that is larger than image size. - // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the - // resources has a type marked as experimental - // "EXTERNAL_API_WARNING" - Warning that is present in an external api - // call - // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been - // overridden. Deprecated unused field. - // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an - // injected kernel, which is deprecated. - // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV - // backend service is associated with a health check that is not of type - // HTTP/HTTPS/HTTP2. - // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a - // exceedingly large number of resources - // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type - // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is - // not assigned to an instance on the network. - // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot - // ip forward. - // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's - // nextHopInstance URL refers to an instance that does not have an ipv6 - // interface on the same network as the route. - // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL - // refers to an instance that does not exist. - // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance - // URL refers to an instance that is not on the same network as the - // route. - // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not - // have a status of RUNNING. - // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to - // continue the process despite the mentioned error. - // "NO_RESULTS_ON_PAGE" - No results are present on a particular list - // page. - // "PARTIAL_SUCCESS" - Success is reported, but some results may be - // missing due to errors - // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource - // that requires a TOS they have not accepted. - // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a - // resource is in use. - // "RESOURCE_NOT_DELETED" - One or more of the resources set to - // auto-delete could not be deleted because they were in use. - // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is - // ignored. - // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in - // instance group manager is valid as such, but its application does not - // make a lot of sense, because it allows only single instance in - // instance group. - // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema - // are present - // "UNREACHABLE" - A given scope cannot be reached. - Code string `json:"code,omitempty"` - - // Data: [Output Only] Metadata about this warning in key: value format. - // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" - // } - Data []*InterconnectAttachmentListWarningData `json:"data,omitempty"` + // "MD5_OPTIONAL" - MD5_OPTIONAL: BGP MD5 authentication is supported + // and can optionally be configured. + // "MD5_REQUIRED" - MD5_REQUIRED: BGP MD5 authentication must be + // configured. + // "MD5_UNSUPPORTED" - MD5_UNSUPPORTED: BGP MD5 authentication must + // not be configured + BgpMd5 string `json:"bgpMd5,omitempty"` - // Message: [Output Only] A human-readable description of the warning - // code. - Message string `json:"message,omitempty"` + // BgpPeerAsnRanges: [Output Only] List of ASN ranges that the remote + // location is known to support. Formatted as an array of inclusive + // ranges {min: min-value, max: max-value}. For example, [{min: 123, + // max: 123}, {min: 64512, max: 65534}] allows the peer ASN to be 123 or + // anything in the range 64512-65534. This field is only advisory. + // Although the API accepts other ranges, these are the ranges that we + // recommend. + BgpPeerAsnRanges []*InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange `json:"bgpPeerAsnRanges,omitempty"` - // ForceSendFields is a list of field names (e.g. "Code") to + // ForceSendFields is a list of field names (e.g. "BgpMd5") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -22894,7 +23728,7 @@ type InterconnectAttachmentListWarning struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Code") to include in API + // NullFields is a list of field names (e.g. "BgpMd5") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -22903,27 +23737,18 @@ type InterconnectAttachmentListWarning struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentListWarning) MarshalJSON() ([]byte, error) { - type NoMethod InterconnectAttachmentListWarning +func (s *InterconnectAttachmentConfigurationConstraints) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentConfigurationConstraints raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type InterconnectAttachmentListWarningData struct { - // Key: [Output Only] A key that provides more detail on the warning - // being returned. For example, for warnings where there are no results - // in a list request for a particular zone, this key might be scope and - // the key value might be the zone name. Other examples might be a key - // indicating a deprecated resource and a suggested replacement, or a - // warning about invalid network settings (for example, if an instance - // attempts to perform IP forwarding but is not enabled for IP - // forwarding). - Key string `json:"key,omitempty"` +type InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange struct { + Max int64 `json:"max,omitempty"` - // Value: [Output Only] A warning data value corresponding to the key. - Value string `json:"value,omitempty"` + Min int64 `json:"min,omitempty"` - // ForceSendFields is a list of field names (e.g. "Key") to + // ForceSendFields is a list of field names (e.g. "Max") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -22931,7 +23756,7 @@ type InterconnectAttachmentListWarningData struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Key") to include in API + // NullFields is a list of field names (e.g. "Max") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -22940,67 +23765,46 @@ type InterconnectAttachmentListWarningData struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentListWarningData) MarshalJSON() ([]byte, error) { - type NoMethod InterconnectAttachmentListWarningData +func (s *InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentConfigurationConstraintsBgpPeerASNRange raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InterconnectAttachmentPartnerMetadata: Informational metadata about -// Partner attachments from Partners to display to customers. These -// fields are propagated from PARTNER_PROVIDER attachments to their -// corresponding PARTNER attachments. -type InterconnectAttachmentPartnerMetadata struct { - // InterconnectName: Plain text name of the Interconnect this attachment - // is connected to, as displayed in the Partner's portal. For instance - // "Chicago 1". This value may be validated to match approved Partner - // values. - InterconnectName string `json:"interconnectName,omitempty"` +// InterconnectAttachmentList: Response to the list request, and +// contains a list of interconnect attachments. +type InterconnectAttachmentList struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` - // PartnerName: Plain text name of the Partner providing this - // attachment. This value may be validated to match approved Partner - // values. - PartnerName string `json:"partnerName,omitempty"` + // Items: A list of InterconnectAttachment resources. + Items []*InterconnectAttachment `json:"items,omitempty"` - // PortalUrl: URL of the Partner's portal for this Attachment. Partners - // may customise this to be a deep link to the specific resource on the - // Partner portal. This value may be validated to match approved Partner - // values. - PortalUrl string `json:"portalUrl,omitempty"` + // Kind: [Output Only] Type of resource. Always + // compute#interconnectAttachmentList for lists of interconnect + // attachments. + Kind string `json:"kind,omitempty"` - // ForceSendFields is a list of field names (e.g. "InterconnectName") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` - // NullFields is a list of field names (e.g. "InterconnectName") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` -func (s *InterconnectAttachmentPartnerMetadata) MarshalJSON() ([]byte, error) { - type NoMethod InterconnectAttachmentPartnerMetadata - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Warning: [Output Only] Informational warning message. + Warning *InterconnectAttachmentListWarning `json:"warning,omitempty"` -// InterconnectAttachmentPrivateInfo: Information for an interconnect -// attachment when this belongs to an interconnect of type DEDICATED. -type InterconnectAttachmentPrivateInfo struct { - // Tag8021q: [Output Only] 802.1q encapsulation tag to be used for - // traffic between Google and the customer, going to and from this - // network and region. - Tag8021q int64 `json:"tag8021q,omitempty"` + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Tag8021q") to + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -23008,8 +23812,8 @@ type InterconnectAttachmentPrivateInfo struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Tag8021q") to include in - // API requests with the JSON null value. By default, fields with empty + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. @@ -23017,49 +23821,262 @@ type InterconnectAttachmentPrivateInfo struct { NullFields []string `json:"-"` } -func (s *InterconnectAttachmentPrivateInfo) MarshalJSON() ([]byte, error) { - type NoMethod InterconnectAttachmentPrivateInfo - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type InterconnectAttachmentsScopedList struct { - // InterconnectAttachments: A list of interconnect attachments contained - // in this scope. - InterconnectAttachments []*InterconnectAttachment `json:"interconnectAttachments,omitempty"` - - // Warning: Informational warning which replaces the list of addresses - // when the list is empty. - Warning *InterconnectAttachmentsScopedListWarning `json:"warning,omitempty"` - - // ForceSendFields is a list of field names (e.g. - // "InterconnectAttachments") to unconditionally include in API - // requests. By default, fields with empty or default values are omitted - // from API requests. However, any non-pointer, non-interface field - // appearing in ForceSendFields will be sent to the server regardless of - // whether the field is empty or not. This may be used to include empty - // fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "InterconnectAttachments") - // to include in API requests with the JSON null value. By default, - // fields with empty values are omitted from API requests. However, any - // field with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *InterconnectAttachmentsScopedList) MarshalJSON() ([]byte, error) { - type NoMethod InterconnectAttachmentsScopedList +func (s *InterconnectAttachmentList) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentList raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InterconnectAttachmentsScopedListWarning: Informational warning which -// replaces the list of addresses when the list is empty. -type InterconnectAttachmentsScopedListWarning struct { +// InterconnectAttachmentListWarning: [Output Only] Informational +// warning message. +type InterconnectAttachmentListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*InterconnectAttachmentListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectAttachmentListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachmentPartnerMetadata: Informational metadata about +// Partner attachments from Partners to display to customers. These +// fields are propagated from PARTNER_PROVIDER attachments to their +// corresponding PARTNER attachments. +type InterconnectAttachmentPartnerMetadata struct { + // InterconnectName: Plain text name of the Interconnect this attachment + // is connected to, as displayed in the Partner's portal. For instance + // "Chicago 1". This value may be validated to match approved Partner + // values. + InterconnectName string `json:"interconnectName,omitempty"` + + // PartnerName: Plain text name of the Partner providing this + // attachment. This value may be validated to match approved Partner + // values. + PartnerName string `json:"partnerName,omitempty"` + + // PortalUrl: URL of the Partner's portal for this Attachment. Partners + // may customise this to be a deep link to the specific resource on the + // Partner portal. This value may be validated to match approved Partner + // values. + PortalUrl string `json:"portalUrl,omitempty"` + + // ForceSendFields is a list of field names (e.g. "InterconnectName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InterconnectName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentPartnerMetadata) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentPartnerMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachmentPrivateInfo: Information for an interconnect +// attachment when this belongs to an interconnect of type DEDICATED. +type InterconnectAttachmentPrivateInfo struct { + // Tag8021q: [Output Only] 802.1q encapsulation tag to be used for + // traffic between Google and the customer, going to and from this + // network and region. + Tag8021q int64 `json:"tag8021q,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Tag8021q") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Tag8021q") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentPrivateInfo) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentPrivateInfo + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectAttachmentsScopedList struct { + // InterconnectAttachments: A list of interconnect attachments contained + // in this scope. + InterconnectAttachments []*InterconnectAttachment `json:"interconnectAttachments,omitempty"` + + // Warning: Informational warning which replaces the list of addresses + // when the list is empty. + Warning *InterconnectAttachmentsScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "InterconnectAttachments") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InterconnectAttachments") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectAttachmentsScopedList) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectAttachmentsScopedList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectAttachmentsScopedListWarning: Informational warning which +// replaces the list of addresses when the list is empty. +type InterconnectAttachmentsScopedListWarning struct { // Code: [Output Only] A warning code, if applicable. For example, // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in // the response. @@ -23772,9 +24789,7 @@ type InterconnectLocation struct { // Interconnects. Status string `json:"status,omitempty"` - // SupportsPzs: [Output Only] Set to true for locations that support - // physical zone separation. Defaults to false if the field is not - // present. + // SupportsPzs: [Output Only] Reserved for future use. SupportsPzs bool `json:"supportsPzs,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -24144,86 +25159,123 @@ func (s *InterconnectOutageNotification) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// InterconnectsGetDiagnosticsResponse: Response for the -// InterconnectsGetDiagnosticsRequest. -type InterconnectsGetDiagnosticsResponse struct { - Result *InterconnectDiagnostics `json:"result,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` +// InterconnectRemoteLocation: Represents a Cross-Cloud Interconnect +// Remote Location resource. You can use this resource to find remote +// location details about an Interconnect attachment (VLAN). +type InterconnectRemoteLocation struct { + // Address: [Output Only] The postal address of the Point of Presence, + // each line in the address is separated by a newline character. + Address string `json:"address,omitempty"` - // ForceSendFields is a list of field names (e.g. "Result") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` + // AttachmentConfigurationConstraints: [Output Only] Subset of fields + // from InterconnectAttachment's |configurationConstraints| field that + // apply to all attachments for this remote location. + AttachmentConfigurationConstraints *InterconnectAttachmentConfigurationConstraints `json:"attachmentConfigurationConstraints,omitempty"` - // NullFields is a list of field names (e.g. "Result") to include in API - // requests with the JSON null value. By default, fields with empty - // values are omitted from API requests. However, any field with an - // empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} + // City: [Output Only] Metropolitan area designator that indicates which + // city an interconnect is located. For example: "Chicago, IL", + // "Amsterdam, Netherlands". + City string `json:"city,omitempty"` -func (s *InterconnectsGetDiagnosticsResponse) MarshalJSON() ([]byte, error) { - type NoMethod InterconnectsGetDiagnosticsResponse - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} + // Constraints: [Output Only] Constraints on the parameters for creating + // Cross-Cloud Interconnect and associated InterconnectAttachments. + Constraints *InterconnectRemoteLocationConstraints `json:"constraints,omitempty"` -// License: Represents a License resource. A License represents billing -// and aggregate usage data for public and marketplace images. *Caution* -// This resource is intended for use only by third-party partners who -// are creating Cloud Marketplace images. -type License struct { - // ChargesUseFee: [Output Only] Deprecated. This field no longer - // reflects whether a license charges a usage fee. - ChargesUseFee bool `json:"chargesUseFee,omitempty"` + // Continent: [Output Only] Continent for this location, which can take + // one of the following values: - AFRICA - ASIA_PAC - EUROPE - + // NORTH_AMERICA - SOUTH_AMERICA + // + // Possible values: + // "AFRICA" + // "ASIA_PAC" + // "EUROPE" + // "NORTH_AMERICA" + // "SOUTH_AMERICA" + Continent string `json:"continent,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` - // Description: An optional textual description of the resource; - // provided by the client when the resource is created. + // Description: [Output Only] An optional description of the resource. Description string `json:"description,omitempty"` + // FacilityProvider: [Output Only] The name of the provider for this + // facility (e.g., EQUINIX). + FacilityProvider string `json:"facilityProvider,omitempty"` + + // FacilityProviderFacilityId: [Output Only] A provider-assigned + // Identifier for this facility (e.g., Ashburn-DC1). + FacilityProviderFacilityId string `json:"facilityProviderFacilityId,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` - // Kind: [Output Only] Type of resource. Always compute#license for - // licenses. + // Kind: [Output Only] Type of the resource. Always + // compute#interconnectRemoteLocation for interconnect remote locations. Kind string `json:"kind,omitempty"` - // LicenseCode: [Output Only] The unique code used to attach this - // license to images, snapshots, and disks. - LicenseCode uint64 `json:"licenseCode,omitempty,string"` + // Lacp: [Output Only] Link Aggregation Control Protocol (LACP) + // constraints, which can take one of the following values: + // LACP_SUPPORTED, LACP_UNSUPPORTED + // + // Possible values: + // "LACP_SUPPORTED" - LACP_SUPPORTED: LACP is supported, and enabled + // by default on the Cross-Cloud Interconnect. + // "LACP_UNSUPPORTED" - LACP_UNSUPPORTED: LACP is not supported and is + // not be enabled on this port. GetDiagnostics shows + // bundleAggregationType as "static". GCP does not support LAGs without + // LACP, so requestedLinkCount must be 1. + Lacp string `json:"lacp,omitempty"` + + // MaxLagSize100Gbps: [Output Only] The maximum number of 100 Gbps ports + // supported in a link aggregation group (LAG). When linkType is 100 + // Gbps, requestedLinkCount cannot exceed max_lag_size_100_gbps. + MaxLagSize100Gbps int64 `json:"maxLagSize100Gbps,omitempty"` + + // MaxLagSize10Gbps: [Output Only] The maximum number of 10 Gbps ports + // supported in a link aggregation group (LAG). When linkType is 10 + // Gbps, requestedLinkCount cannot exceed max_lag_size_10_gbps. + MaxLagSize10Gbps int64 `json:"maxLagSize10Gbps,omitempty"` - // Name: Name of the resource. The name must be 1-63 characters long and - // comply with RFC1035. + // Name: [Output Only] Name of the resource. Name string `json:"name,omitempty"` - ResourceRequirements *LicenseResourceRequirements `json:"resourceRequirements,omitempty"` + // PeeringdbFacilityId: [Output Only] The peeringdb identifier for this + // facility (corresponding with a netfac type in peeringdb). + PeeringdbFacilityId string `json:"peeringdbFacilityId,omitempty"` + + // PermittedConnections: [Output Only] Permitted connections. + PermittedConnections []*InterconnectRemoteLocationPermittedConnections `json:"permittedConnections,omitempty"` + + // RemoteService: [Output Only] Indicates the service provider present + // at the remote location. Example values: "Amazon Web Services", + // "Microsoft Azure". + RemoteService string `json:"remoteService,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // Transferable: If false, licenses will not be copied from the source - // resource when creating an image from a disk, disk from snapshot, or - // snapshot from disk. - Transferable bool `json:"transferable,omitempty"` + // Status: [Output Only] The status of this InterconnectRemoteLocation, + // which can take one of the following values: - CLOSED: The + // InterconnectRemoteLocation is closed and is unavailable for + // provisioning new Cross-Cloud Interconnects. - AVAILABLE: The + // InterconnectRemoteLocation is available for provisioning new + // Cross-Cloud Interconnects. + // + // Possible values: + // "AVAILABLE" - The InterconnectRemoteLocation is available for + // provisioning new Cross-Cloud Interconnects. + // "CLOSED" - The InterconnectRemoteLocation is closed for + // provisioning new Cross-Cloud Interconnects. + Status string `json:"status,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "ChargesUseFee") to + // ForceSendFields is a list of field names (e.g. "Address") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -24231,144 +25283,101 @@ type License struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "ChargesUseFee") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as + // NullFields is a list of field names (e.g. "Address") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } -func (s *License) MarshalJSON() ([]byte, error) { - type NoMethod License +func (s *InterconnectRemoteLocation) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectRemoteLocation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// LicenseCode: Represents a License Code resource. A License Code is a -// unique identifier used to represent a license resource. *Caution* -// This resource is intended for use only by third-party partners who -// are creating Cloud Marketplace images. -type LicenseCode struct { - // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text - // format. - CreationTimestamp string `json:"creationTimestamp,omitempty"` - - // Description: [Output Only] Description of this License Code. - Description string `json:"description,omitempty"` - - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. - Id uint64 `json:"id,omitempty,string"` - - // Kind: [Output Only] Type of resource. Always compute#licenseCode for - // licenses. - Kind string `json:"kind,omitempty"` - - // LicenseAlias: [Output Only] URL and description aliases of Licenses - // with the same License Code. - LicenseAlias []*LicenseCodeLicenseAlias `json:"licenseAlias,omitempty"` - - // Name: [Output Only] Name of the resource. The name is 1-20 characters - // long and must be a valid 64 bit integer. - Name string `json:"name,omitempty"` - - // SelfLink: [Output Only] Server-defined URL for the resource. - SelfLink string `json:"selfLink,omitempty"` - - // State: [Output Only] Current state of this License Code. +type InterconnectRemoteLocationConstraints struct { + // PortPairRemoteLocation: [Output Only] Port pair remote location + // constraints, which can take one of the following values: + // PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, + // PORT_PAIR_MATCHING_REMOTE_LOCATION. GCP's API refers only to + // individual ports, but the UI uses this field when ordering a pair of + // ports, to prevent users from accidentally ordering something that is + // incompatible with their cloud provider. Specifically, when ordering a + // redundant pair of Cross-Cloud Interconnect ports, and one of them + // uses a remote location with portPairMatchingRemoteLocation set to + // matching, the UI requires that both ports use the same remote + // location. // // Possible values: - // "DISABLED" - Machines are not allowed to attach boot disks with - // this License Code. Requests to create new resources with this license - // will be rejected. - // "ENABLED" - Use is allowed for anyone with USE_READ_ONLY access to - // this License Code. - // "RESTRICTED" - Use of this license is limited to a project - // whitelist. - // "STATE_UNSPECIFIED" - // "TERMINATED" - Reserved state. - State string `json:"state,omitempty"` - - // Transferable: [Output Only] If true, the license will remain attached - // when creating images or snapshots from disks. Otherwise, the license - // is not transferred. - Transferable bool `json:"transferable,omitempty"` - - // ServerResponse contains the HTTP response code and headers from the - // server. - googleapi.ServerResponse `json:"-"` + // "PORT_PAIR_MATCHING_REMOTE_LOCATION" - If + // PORT_PAIR_MATCHING_REMOTE_LOCATION, the remote cloud provider + // allocates ports in pairs, and the user should choose the same remote + // location for both ports. + // "PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION" - If + // PORT_PAIR_UNCONSTRAINED_REMOTE_LOCATION, a user may opt to provision + // a redundant pair of Cross-Cloud Interconnects using two different + // remote locations in the same city. + PortPairRemoteLocation string `json:"portPairRemoteLocation,omitempty"` + + // PortPairVlan: [Output Only] Port pair VLAN constraints, which can + // take one of the following values: PORT_PAIR_UNCONSTRAINED_VLAN, + // PORT_PAIR_MATCHING_VLAN + // + // Possible values: + // "PORT_PAIR_MATCHING_VLAN" - If PORT_PAIR_MATCHING_VLAN, the + // Interconnect for this attachment is part of a pair of ports that + // should have matching VLAN allocations. This occurs with Cross-Cloud + // Interconnect to Azure remote locations. While GCP's API does not + // explicitly group pairs of ports, the UI uses this field to ensure + // matching VLAN ids when configuring a redundant VLAN pair. + // "PORT_PAIR_UNCONSTRAINED_VLAN" - PORT_PAIR_UNCONSTRAINED_VLAN means + // there is no constraint. + PortPairVlan string `json:"portPairVlan,omitempty"` + + // SubnetLengthRange: [Output Only] [min-length, max-length] The minimum + // and maximum value (inclusive) for the IPv4 subnet length. For + // example, an interconnectRemoteLocation for Azure has {min: 30, max: + // 30} because Azure requires /30 subnets. This range specifies the + // values supported by both cloud providers. Interconnect currently + // supports /29 and /30 IPv4 subnet lengths. If a remote cloud has no + // constraint on IPv4 subnet length, the range would thus be {min: 29, + // max: 30}. + SubnetLengthRange *InterconnectRemoteLocationConstraintsSubnetLengthRange `json:"subnetLengthRange,omitempty"` - // ForceSendFields is a list of field names (e.g. "CreationTimestamp") - // to unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "PortPairRemoteLocation") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "CreationTimestamp") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the + // NullFields is a list of field names (e.g. "PortPairRemoteLocation") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } -func (s *LicenseCode) MarshalJSON() ([]byte, error) { - type NoMethod LicenseCode - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type LicenseCodeLicenseAlias struct { - // Description: [Output Only] Description of this License Code. - Description string `json:"description,omitempty"` - - // SelfLink: [Output Only] URL of license corresponding to this License - // Code. - SelfLink string `json:"selfLink,omitempty"` - - // ForceSendFields is a list of field names (e.g. "Description") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "Description") to include - // in API requests with the JSON null value. By default, fields with - // empty values are omitted from API requests. However, any field with - // an empty value appearing in NullFields will be sent to the server as - // null. It is an error if a field in this list has a non-empty value. - // This may be used to include null fields in Patch requests. - NullFields []string `json:"-"` -} - -func (s *LicenseCodeLicenseAlias) MarshalJSON() ([]byte, error) { - type NoMethod LicenseCodeLicenseAlias +func (s *InterconnectRemoteLocationConstraints) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectRemoteLocationConstraints raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// LicenseResourceCommitment: Commitment for a particular license -// resource. -type LicenseResourceCommitment struct { - // Amount: The number of licenses purchased. - Amount int64 `json:"amount,omitempty,string"` - - // CoresPerLicense: Specifies the core range of the instance for which - // this license applies. - CoresPerLicense string `json:"coresPerLicense,omitempty"` +type InterconnectRemoteLocationConstraintsSubnetLengthRange struct { + Max int64 `json:"max,omitempty"` - // License: Any applicable license URI. - License string `json:"license,omitempty"` + Min int64 `json:"min,omitempty"` - // ForceSendFields is a list of field names (e.g. "Amount") to + // ForceSendFields is a list of field names (e.g. "Max") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any // non-pointer, non-interface field appearing in ForceSendFields will be @@ -24376,7 +25385,7 @@ type LicenseResourceCommitment struct { // This may be used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "Amount") to include in API + // NullFields is a list of field names (e.g. "Max") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -24385,66 +25394,534 @@ type LicenseResourceCommitment struct { NullFields []string `json:"-"` } -func (s *LicenseResourceCommitment) MarshalJSON() ([]byte, error) { - type NoMethod LicenseResourceCommitment - raw := NoMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) -} - -type LicenseResourceRequirements struct { - // MinGuestCpuCount: Minimum number of guest cpus required to use the - // Instance. Enforced at Instance creation and Instance start. - MinGuestCpuCount int64 `json:"minGuestCpuCount,omitempty"` - - // MinMemoryMb: Minimum memory required to use the Instance. Enforced at - // Instance creation and Instance start. - MinMemoryMb int64 `json:"minMemoryMb,omitempty"` - - // ForceSendFields is a list of field names (e.g. "MinGuestCpuCount") to - // unconditionally include in API requests. By default, fields with - // empty or default values are omitted from API requests. However, any - // non-pointer, non-interface field appearing in ForceSendFields will be - // sent to the server regardless of whether the field is empty or not. - // This may be used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` - - // NullFields is a list of field names (e.g. "MinGuestCpuCount") to - // include in API requests with the JSON null value. By default, fields - // with empty values are omitted from API requests. However, any field - // with an empty value appearing in NullFields will be sent to the - // server as null. It is an error if a field in this list has a - // non-empty value. This may be used to include null fields in Patch - // requests. - NullFields []string `json:"-"` -} - -func (s *LicenseResourceRequirements) MarshalJSON() ([]byte, error) { - type NoMethod LicenseResourceRequirements +func (s *InterconnectRemoteLocationConstraintsSubnetLengthRange) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectRemoteLocationConstraintsSubnetLengthRange raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -type LicensesListResponse struct { +// InterconnectRemoteLocationList: Response to the list request, and +// contains a list of interconnect remote locations. +type InterconnectRemoteLocationList struct { // Id: [Output Only] Unique identifier for the resource; defined by the // server. Id string `json:"id,omitempty"` - // Items: A list of License resources. - Items []*License `json:"items,omitempty"` + // Items: A list of InterconnectRemoteLocation resources. + Items []*InterconnectRemoteLocation `json:"items,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. + // Kind: [Output Only] Type of resource. Always + // compute#interconnectRemoteLocationList for lists of interconnect + // remote locations. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token lets you get the next page of + // results for list requests. If the number of results is larger than + // maxResults, use the nextPageToken as a value for the query parameter + // pageToken in the next list request. Subsequent list requests will + // have their own nextPageToken to continue paging through the results. NextPageToken string `json:"nextPageToken,omitempty"` // SelfLink: [Output Only] Server-defined URL for this resource. SelfLink string `json:"selfLink,omitempty"` // Warning: [Output Only] Informational warning message. - Warning *LicensesListResponseWarning `json:"warning,omitempty"` + Warning *InterconnectRemoteLocationListWarning `json:"warning,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Id") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectRemoteLocationList) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectRemoteLocationList + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectRemoteLocationListWarning: [Output Only] Informational +// warning message. +type InterconnectRemoteLocationListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" - Warning about failed cleanup of transient + // changes made by a failed operation. + // "DEPRECATED_RESOURCE_USED" - A link to a deprecated resource was + // created. + // "DEPRECATED_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as deprecated + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" - The user created a boot disk + // that is larger than image size. + // "EXPERIMENTAL_TYPE_USED" - When deploying and at least one of the + // resources has a type marked as experimental + // "EXTERNAL_API_WARNING" - Warning that is present in an external api + // call + // "FIELD_VALUE_OVERRIDEN" - Warning that value of a field has been + // overridden. Deprecated unused field. + // "INJECTED_KERNELS_DEPRECATED" - The operation involved use of an + // injected kernel, which is deprecated. + // "INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB" - A WEIGHTED_MAGLEV + // backend service is associated with a health check that is not of type + // HTTP/HTTPS/HTTP2. + // "LARGE_DEPLOYMENT_WARNING" - When deploying a deployment with a + // exceedingly large number of resources + // "MISSING_TYPE_DEPENDENCY" - A resource depends on a missing type + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" - The route's nextHopIp address is + // not assigned to an instance on the network. + // "NEXT_HOP_CANNOT_IP_FORWARD" - The route's next hop instance cannot + // ip forward. + // "NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE" - The route's + // nextHopInstance URL refers to an instance that does not have an ipv6 + // interface on the same network as the route. + // "NEXT_HOP_INSTANCE_NOT_FOUND" - The route's nextHopInstance URL + // refers to an instance that does not exist. + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" - The route's nextHopInstance + // URL refers to an instance that is not on the same network as the + // route. + // "NEXT_HOP_NOT_RUNNING" - The route's next hop instance does not + // have a status of RUNNING. + // "NOT_CRITICAL_ERROR" - Error which is not critical. We decided to + // continue the process despite the mentioned error. + // "NO_RESULTS_ON_PAGE" - No results are present on a particular list + // page. + // "PARTIAL_SUCCESS" - Success is reported, but some results may be + // missing due to errors + // "REQUIRED_TOS_AGREEMENT" - The user attempted to use a resource + // that requires a TOS they have not accepted. + // "RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING" - Warning that a + // resource is in use. + // "RESOURCE_NOT_DELETED" - One or more of the resources set to + // auto-delete could not be deleted because they were in use. + // "SCHEMA_VALIDATION_IGNORED" - When a resource schema validation is + // ignored. + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" - Instance template used in + // instance group manager is valid as such, but its application does not + // make a lot of sense, because it allows only single instance in + // instance group. + // "UNDECLARED_PROPERTIES" - When undeclared properties in the schema + // are present + // "UNREACHABLE" - A given scope cannot be reached. + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: "data": [ { "key": "scope", "value": "zones/us-east1-d" + // } + Data []*InterconnectRemoteLocationListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectRemoteLocationListWarning) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectRemoteLocationListWarning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectRemoteLocationListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Key") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectRemoteLocationListWarningData) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectRemoteLocationListWarningData + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type InterconnectRemoteLocationPermittedConnections struct { + // InterconnectLocation: [Output Only] URL of an Interconnect location + // that is permitted to connect to this Interconnect remote location. + InterconnectLocation string `json:"interconnectLocation,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "InterconnectLocation") to unconditionally include in API requests. + // By default, fields with empty or default values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "InterconnectLocation") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectRemoteLocationPermittedConnections) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectRemoteLocationPermittedConnections + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// InterconnectsGetDiagnosticsResponse: Response for the +// InterconnectsGetDiagnosticsRequest. +type InterconnectsGetDiagnosticsResponse struct { + Result *InterconnectDiagnostics `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Result") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Result") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *InterconnectsGetDiagnosticsResponse) MarshalJSON() ([]byte, error) { + type NoMethod InterconnectsGetDiagnosticsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// License: Represents a License resource. A License represents billing +// and aggregate usage data for public and marketplace images. *Caution* +// This resource is intended for use only by third-party partners who +// are creating Cloud Marketplace images. +type License struct { + // ChargesUseFee: [Output Only] Deprecated. This field no longer + // reflects whether a license charges a usage fee. + ChargesUseFee bool `json:"chargesUseFee,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional textual description of the resource; + // provided by the client when the resource is created. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#license for + // licenses. + Kind string `json:"kind,omitempty"` + + // LicenseCode: [Output Only] The unique code used to attach this + // license to images, snapshots, and disks. + LicenseCode uint64 `json:"licenseCode,omitempty,string"` + + // Name: Name of the resource. The name must be 1-63 characters long and + // comply with RFC1035. + Name string `json:"name,omitempty"` + + ResourceRequirements *LicenseResourceRequirements `json:"resourceRequirements,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // Transferable: If false, licenses will not be copied from the source + // resource when creating an image from a disk, disk from snapshot, or + // snapshot from disk. + Transferable bool `json:"transferable,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "ChargesUseFee") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ChargesUseFee") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *License) MarshalJSON() ([]byte, error) { + type NoMethod License + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LicenseCode: Represents a License Code resource. A License Code is a +// unique identifier used to represent a license resource. *Caution* +// This resource is intended for use only by third-party partners who +// are creating Cloud Marketplace images. +type LicenseCode struct { + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: [Output Only] Description of this License Code. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + // Kind: [Output Only] Type of resource. Always compute#licenseCode for + // licenses. + Kind string `json:"kind,omitempty"` + + // LicenseAlias: [Output Only] URL and description aliases of Licenses + // with the same License Code. + LicenseAlias []*LicenseCodeLicenseAlias `json:"licenseAlias,omitempty"` + + // Name: [Output Only] Name of the resource. The name is 1-20 characters + // long and must be a valid 64 bit integer. + Name string `json:"name,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // State: [Output Only] Current state of this License Code. + // + // Possible values: + // "DISABLED" - Machines are not allowed to attach boot disks with + // this License Code. Requests to create new resources with this license + // will be rejected. + // "ENABLED" - Use is allowed for anyone with USE_READ_ONLY access to + // this License Code. + // "RESTRICTED" - Use of this license is limited to a project + // whitelist. + // "STATE_UNSPECIFIED" + // "TERMINATED" - Reserved state. + State string `json:"state,omitempty"` + + // Transferable: [Output Only] If true, the license will remain attached + // when creating images or snapshots from disks. Otherwise, the license + // is not transferred. + Transferable bool `json:"transferable,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CreationTimestamp") + // to unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreationTimestamp") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *LicenseCode) MarshalJSON() ([]byte, error) { + type NoMethod LicenseCode + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LicenseCodeLicenseAlias struct { + // Description: [Output Only] Description of this License Code. + Description string `json:"description,omitempty"` + + // SelfLink: [Output Only] URL of license corresponding to this License + // Code. + SelfLink string `json:"selfLink,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LicenseCodeLicenseAlias) MarshalJSON() ([]byte, error) { + type NoMethod LicenseCodeLicenseAlias + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// LicenseResourceCommitment: Commitment for a particular license +// resource. +type LicenseResourceCommitment struct { + // Amount: The number of licenses purchased. + Amount int64 `json:"amount,omitempty,string"` + + // CoresPerLicense: Specifies the core range of the instance for which + // this license applies. + CoresPerLicense string `json:"coresPerLicense,omitempty"` + + // License: Any applicable license URI. + License string `json:"license,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Amount") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Amount") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *LicenseResourceCommitment) MarshalJSON() ([]byte, error) { + type NoMethod LicenseResourceCommitment + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LicenseResourceRequirements struct { + // MinGuestCpuCount: Minimum number of guest cpus required to use the + // Instance. Enforced at Instance creation and Instance start. + MinGuestCpuCount int64 `json:"minGuestCpuCount,omitempty"` + + // MinMemoryMb: Minimum memory required to use the Instance. Enforced at + // Instance creation and Instance start. + MinMemoryMb int64 `json:"minMemoryMb,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MinGuestCpuCount") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MinGuestCpuCount") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *LicenseResourceRequirements) MarshalJSON() ([]byte, error) { + type NoMethod LicenseResourceRequirements + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type LicensesListResponse struct { + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. + Id string `json:"id,omitempty"` + + // Items: A list of License resources. + Items []*License `json:"items,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // Warning: [Output Only] Informational warning message. + Warning *LicensesListResponseWarning `json:"warning,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. @@ -25390,7 +26867,7 @@ type MachineTypeAccelerators struct { GuestAcceleratorCount int64 `json:"guestAcceleratorCount,omitempty"` // GuestAcceleratorType: The accelerator type resource name, not a full - // URL, e.g. 'nvidia-tesla-k80'. + // URL, e.g. nvidia-tesla-t4. GuestAcceleratorType string `json:"guestAcceleratorType,omitempty"` // ForceSendFields is a list of field names (e.g. @@ -26072,9 +27549,9 @@ type ManagedInstance struct { // is empty when the instance does not exist. // // Possible values: - // "DEPROVISIONING" - The Nanny is halted and we are performing tear - // down tasks like network deprogramming, releasing quota, IP, tearing - // down disks etc. + // "DEPROVISIONING" - The instance is halted and we are performing + // tear down tasks like network deprogramming, releasing quota, IP, + // tearing down disks etc. // "PROVISIONING" - Resources are being allocated for the instance. // "REPAIRING" - The instance is in repair. // "RUNNING" - The instance is running. @@ -26584,7 +28061,7 @@ type Network struct { FirewallPolicy string `json:"firewallPolicy,omitempty"` // GatewayIPv4: [Output Only] The gateway address for default routing - // out of the network, selected by GCP. + // out of the network, selected by Google Cloud. GatewayIPv4 string `json:"gatewayIPv4,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -26696,10 +28173,9 @@ type NetworkAttachment struct { // property when you create the resource. Description string `json:"description,omitempty"` - // Fingerprint: [Output Only] Fingerprint of this resource. A hash of - // the contents stored in this object. This field is used in optimistic - // locking. An up-to-date fingerprint must be provided in order to - // patch. + // Fingerprint: Fingerprint of this resource. A hash of the contents + // stored in this object. This field is used in optimistic locking. An + // up-to-date fingerprint must be provided in order to patch. Fingerprint string `json:"fingerprint,omitempty"` // Id: [Output Only] The unique identifier for the resource type. The @@ -26719,7 +28195,11 @@ type NetworkAttachment struct { Name string `json:"name,omitempty"` // Network: [Output Only] The URL of the network which the Network - // Attachment belongs to. + // Attachment belongs to. Practically it is inferred by fetching the + // network of the first subnetwork associated. Because it is required + // that all the subnetworks must be from the same network, it is assured + // that the Network Attachment belongs to the same network as all the + // subnetworks. Network string `json:"network,omitempty"` // ProducerAcceptLists: Projects that are allowed to connect to this @@ -26970,7 +28450,7 @@ func (s *NetworkAttachmentAggregatedListWarningData) MarshalJSON() ([]byte, erro // NetworkAttachmentConnectedEndpoint: [Output Only] A connection // connected to this network attachment. type NetworkAttachmentConnectedEndpoint struct { - // IpAddress: The IP address assigned to the producer instance network + // IpAddress: The IPv4 address assigned to the producer instance network // interface. This value will be a range in case of Serverless. IpAddress string `json:"ipAddress,omitempty"` @@ -26978,7 +28458,7 @@ type NetworkAttachmentConnectedEndpoint struct { // the IP was assigned. ProjectIdOrNum string `json:"projectIdOrNum,omitempty"` - // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork + // SecondaryIpCidrRanges: Alias IP ranges from the same subnetwork. SecondaryIpCidrRanges []string `json:"secondaryIpCidrRanges,omitempty"` // Status: The status of a connected endpoint to this network @@ -29108,7 +30588,10 @@ type NetworkInterface struct { Ipv6AccessType string `json:"ipv6AccessType,omitempty"` // Ipv6Address: An IPv6 internal network address for this network - // interface. + // interface. To use a static internal IP address, it must be unused and + // in the same region as the instance's zone. If not specified, Google + // Cloud will automatically assign an internal IPv6 address from the + // instance's subnetwork. Ipv6Address string `json:"ipv6Address,omitempty"` // Kind: [Output Only] Type of the resource. Always @@ -29134,6 +30617,12 @@ type NetworkInterface struct { // global/networks/default Network string `json:"network,omitempty"` + // NetworkAttachment: The URL of the network attachment that this + // interface should connect to in the following format: + // projects/{project_number}/regions/{region_name}/networkAttachments/{ne + // twork_attachment_name}. + NetworkAttachment string `json:"networkAttachment,omitempty"` + // NetworkIP: An IPv4 internal IP address to assign to the instance for // this network interface. If not specified by the user, an unused // internal IP is assigned by the system. @@ -29153,10 +30642,11 @@ type NetworkInterface struct { // number. It'll be empty if not specified by the users. QueueCount int64 `json:"queueCount,omitempty"` - // StackType: The stack type for this network interface to identify - // whether the IPv6 feature is enabled or not. If not specified, - // IPV4_ONLY will be used. This field can be both set at instance - // creation and update network interface operations. + // StackType: The stack type for this network interface. To assign only + // IPv4 addresses, use IPV4_ONLY. To assign both IPv4 and IPv6 + // addresses, use IPV4_IPV6. If not specified, IPV4_ONLY is used. This + // field can be both set at instance creation and update network + // interface operations. // // Possible values: // "IPV4_IPV6" - The network interface can have both IPv4 and IPv6 @@ -30843,6 +32333,33 @@ func (s *NodeGroupsSetNodeTemplateRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type NodeGroupsSimulateMaintenanceEventRequest struct { + // Nodes: Names of the nodes to go under maintenance simulation. + Nodes []string `json:"nodes,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Nodes") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Nodes") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *NodeGroupsSimulateMaintenanceEventRequest) MarshalJSON() ([]byte, error) { + type NoMethod NodeGroupsSimulateMaintenanceEventRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // NodeTemplate: Represent a sole-tenant Node Template resource. You can // use a template to define properties for nodes in a node group. For // more information, read Creating node groups and instances. @@ -30893,11 +32410,7 @@ type NodeTemplate struct { // this template. NodeType string `json:"nodeType,omitempty"` - // NodeTypeFlexibility: The flexible properties of the desired node - // type. Node groups that use this node template will create nodes of a - // type that matches these properties. This field is mutually exclusive - // with the node_type property; you can only define one or the other, - // but not both. + // NodeTypeFlexibility: Do not use. Instead, use the node_type property. NodeTypeFlexibility *NodeTemplateNodeTypeFlexibility `json:"nodeTypeFlexibility,omitempty"` // Region: [Output Only] The name of the region where the node template @@ -36258,6 +37771,7 @@ type Quota struct { // "COMMITTED_NVIDIA_A100_80GB_GPUS" // "COMMITTED_NVIDIA_A100_GPUS" // "COMMITTED_NVIDIA_K80_GPUS" + // "COMMITTED_NVIDIA_L4_GPUS" // "COMMITTED_NVIDIA_P100_GPUS" // "COMMITTED_NVIDIA_P4_GPUS" // "COMMITTED_NVIDIA_T4_GPUS" @@ -36306,13 +37820,18 @@ type Quota struct { // "N2D_CPUS" // "N2_CPUS" // "NETWORKS" + // "NETWORK_ATTACHMENTS" // "NETWORK_ENDPOINT_GROUPS" // "NETWORK_FIREWALL_POLICIES" + // "NET_LB_SECURITY_POLICIES_PER_REGION" + // "NET_LB_SECURITY_POLICY_RULES_PER_REGION" + // "NET_LB_SECURITY_POLICY_RULE_ATTRIBUTES_PER_REGION" // "NODE_GROUPS" // "NODE_TEMPLATES" // "NVIDIA_A100_80GB_GPUS" // "NVIDIA_A100_GPUS" // "NVIDIA_K80_GPUS" + // "NVIDIA_L4_GPUS" // "NVIDIA_P100_GPUS" // "NVIDIA_P100_VWS_GPUS" // "NVIDIA_P4_GPUS" @@ -36327,6 +37846,7 @@ type Quota struct { // "PREEMPTIBLE_NVIDIA_A100_80GB_GPUS" // "PREEMPTIBLE_NVIDIA_A100_GPUS" // "PREEMPTIBLE_NVIDIA_K80_GPUS" + // "PREEMPTIBLE_NVIDIA_L4_GPUS" // "PREEMPTIBLE_NVIDIA_P100_GPUS" // "PREEMPTIBLE_NVIDIA_P100_VWS_GPUS" // "PREEMPTIBLE_NVIDIA_P4_GPUS" @@ -36350,6 +37870,7 @@ type Quota struct { // "ROUTES" // "SECURITY_POLICIES" // "SECURITY_POLICIES_PER_REGION" + // "SECURITY_POLICY_ADVANCED_RULES_PER_REGION" // "SECURITY_POLICY_CEVAL_RULES" // "SECURITY_POLICY_RULES" // "SECURITY_POLICY_RULES_PER_REGION" @@ -36589,6 +38110,44 @@ func (s *Region) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type RegionAddressesMoveRequest struct { + // Description: An optional destination address description if intended + // to be different from the source. + Description string `json:"description,omitempty"` + + // DestinationAddress: The URL of the destination address to move to. + // This can be a full or partial URL. For example, the following are all + // valid URLs to a address: - + // https://www.googleapis.com/compute/v1/projects/project/regions/region + // /addresses/address - + // projects/project/regions/region/addresses/address Note that + // destination project must be different from the source project. So + // /regions/region/addresses/address is not valid partial url. + DestinationAddress string `json:"destinationAddress,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RegionAddressesMoveRequest) MarshalJSON() ([]byte, error) { + type NoMethod RegionAddressesMoveRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // RegionAutoscalerList: Contains a list of autoscalers. type RegionAutoscalerList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -38776,6 +40335,10 @@ type Reservation struct { // This is to define placement policy with reservation. ResourcePolicies map[string]string `json:"resourcePolicies,omitempty"` + // ResourceStatus: [Output Only] Status information for Reservation + // resource. + ResourceStatus *AllocationResourceStatus `json:"resourceStatus,omitempty"` + // SatisfiesPzs: [Output Only] Reserved for future use. SatisfiesPzs bool `json:"satisfiesPzs,omitempty"` @@ -40135,7 +41698,7 @@ type ResourcePolicyInstanceSchedulePolicy struct { // TimeZone: Specifies the time zone to be used in interpreting // Schedule.schedule. The value of this field must be a time zone name - // from the tz database: http://en.wikipedia.org/wiki/Tz_database. + // from the tz database: https://wikipedia.org/wiki/Tz_database. TimeZone string `json:"timeZone,omitempty"` // VmStartSchedule: Specifies the schedule for starting instances. @@ -41625,9 +43188,9 @@ type RouterBgpPeer struct { AdvertiseMode string `json:"advertiseMode,omitempty"` // AdvertisedGroups: User-specified list of prefix groups to advertise - // in custom mode, which can take one of the following options: - - // ALL_SUBNETS: Advertises all available subnets, including peer VPC - // subnets. - ALL_VPC_SUBNETS: Advertises the router's own VPC subnets. + // in custom mode, which currently supports the following option: - + // ALL_SUBNETS: Advertises all of the router's own VPC subnets. This + // excludes any routes learned for subnets that use VPC Network Peering. // Note that this field can only be populated if advertise_mode is // CUSTOM and overrides the list defined for the router (in the "bgp" // message). These groups are advertised in addition to any specified @@ -42226,10 +43789,9 @@ type RouterNat struct { // in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A list // of Subnetworks are allowed to Nat (specified in the field subnetwork // below) The default is SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. - // Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or - // ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any - // other Router.Nat section in any Router for this network in this - // region. + // Note that if this field contains ALL_SUBNETWORKS_ALL_IP_RANGES then + // there should not be any other Router.Nat section in any Router for + // this network in this region. // // Possible values: // "ALL_SUBNETWORKS_ALL_IP_RANGES" - All the IP ranges in every @@ -43906,6 +45468,20 @@ type SecurityPolicy struct { // compute#securityPolicyfor security policies Kind string `json:"kind,omitempty"` + // LabelFingerprint: A fingerprint for the labels being applied to this + // security policy, which is essentially a hash of the labels set used + // for optimistic locking. The fingerprint is initially generated by + // Compute Engine and changes after every request to modify or update + // labels. You must always provide an up-to-date fingerprint hash in + // order to update or change labels. To see the latest fingerprint, make + // get() request to the security policy. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels for this resource. These can only be added or modified + // by the setLabels method. Each label key/value pair must comply with + // RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -43945,7 +45521,12 @@ type SecurityPolicy struct { // internal service policies can be configured to filter HTTP requests // targeting services managed by Traffic Director in a service mesh. // They filter requests before the request is served from the - // application. This field can be set only at resource creation time. + // application. - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can + // be configured to filter packets targeting network load balancing + // resources such as backend services, target pools, target instances, + // and instances with external IPs. They filter requests before the + // request is served from the application. This field can be set only at + // resource creation time. // // Possible values: // "CLOUD_ARMOR" @@ -44015,13 +45596,17 @@ func (s *SecurityPolicyAdaptiveProtectionConfig) MarshalJSON() ([]byte, error) { } // SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig: -// Configuration options for L7 DDoS detection. +// Configuration options for L7 DDoS detection. This field is only +// supported in Global Security Policies of type CLOUD_ARMOR. type SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig struct { - // Enable: If set to true, enables CAAP for L7 DDoS detection. + // Enable: If set to true, enables CAAP for L7 DDoS detection. This + // field is only supported in Global Security Policies of type + // CLOUD_ARMOR. Enable bool `json:"enable,omitempty"` // RuleVisibility: Rule visibility can be one of the following: STANDARD - // - opaque rules. (default) PREMIUM - transparent rules. + // - opaque rules. (default) PREMIUM - transparent rules. This field is + // only supported in Global Security Policies of type CLOUD_ARMOR. // // Possible values: // "PREMIUM" @@ -44344,7 +45929,8 @@ type SecurityPolicyRecaptchaOptionsConfig struct { // GOOGLE_RECAPTCHA under the security policy. The specified site key // needs to be created from the reCAPTCHA API. The user is responsible // for the validity of the specified site key. If not specified, a - // Google-managed site key is used. + // Google-managed site key is used. This field is only supported in + // Global Security Policies of type CLOUD_ARMOR. RedirectSiteKey string `json:"redirectSiteKey,omitempty"` // ForceSendFields is a list of field names (e.g. "RedirectSiteKey") to @@ -44403,18 +45989,20 @@ func (s *SecurityPolicyReference) MarshalJSON() ([]byte, error) { // matches this condition (allow or deny). type SecurityPolicyRule struct { // Action: The Action to perform when the rule is matched. The following - // are the valid actions: - allow: allow access to target. - deny(): - // deny access to target, returns the HTTP response code specified - // (valid values are 403, 404, and 502). - rate_based_ban: limit client - // traffic to the configured threshold and ban the client if the traffic - // exceeds the threshold. Configure parameters for this action in - // RateLimitOptions. Requires rate_limit_options to be set. - redirect: - // redirect to a different target. This can either be an internal - // reCAPTCHA redirect, or an external URL-based redirect via a 302 - // response. Parameters for this action can be configured via - // redirectOptions. - throttle: limit client traffic to the configured - // threshold. Configure parameters for this action in rateLimitOptions. - // Requires rate_limit_options to be set for this. + // are the valid actions: - allow: allow access to target. - + // deny(STATUS): deny access to target, returns the HTTP response code + // specified. Valid values for `STATUS` are 403, 404, and 502. - + // rate_based_ban: limit client traffic to the configured threshold and + // ban the client if the traffic exceeds the threshold. Configure + // parameters for this action in RateLimitOptions. Requires + // rate_limit_options to be set. - redirect: redirect to a different + // target. This can either be an internal reCAPTCHA redirect, or an + // external URL-based redirect via a 302 response. Parameters for this + // action can be configured via redirectOptions. This action is only + // supported in Global Security Policies of type CLOUD_ARMOR. - + // throttle: limit client traffic to the configured threshold. Configure + // parameters for this action in rateLimitOptions. Requires + // rate_limit_options to be set for this. Action string `json:"action,omitempty"` // Description: An optional description of this resource. Provide this @@ -44422,7 +46010,8 @@ type SecurityPolicyRule struct { Description string `json:"description,omitempty"` // HeaderAction: Optional, additional actions that are performed on - // headers. + // headers. This field is only supported in Global Security Policies of + // type CLOUD_ARMOR. HeaderAction *SecurityPolicyRuleHttpHeaderAction `json:"headerAction,omitempty"` // Kind: [Output only] Type of the resource. Always @@ -44433,6 +46022,12 @@ type SecurityPolicyRule struct { // If it evaluates to true, the corresponding 'action' is enforced. Match *SecurityPolicyRuleMatcher `json:"match,omitempty"` + // PreconfiguredWafConfig: Preconfigured WAF configuration to be applied + // for the rule. If the rule does not evaluate preconfigured WAF rules, + // i.e., if evaluatePreconfiguredWaf() is not used, this field will have + // no effect. + PreconfiguredWafConfig *SecurityPolicyRulePreconfiguredWafConfig `json:"preconfiguredWafConfig,omitempty"` + // Preview: If set to true, the specified action is not enforced. Preview bool `json:"preview,omitempty"` @@ -44447,7 +46042,8 @@ type SecurityPolicyRule struct { RateLimitOptions *SecurityPolicyRuleRateLimitOptions `json:"rateLimitOptions,omitempty"` // RedirectOptions: Parameters defining the redirect action. Cannot be - // specified for any other actions. + // specified for any other actions. This field is only supported in + // Global Security Policies of type CLOUD_ARMOR. RedirectOptions *SecurityPolicyRuleRedirectOptions `json:"redirectOptions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -44547,7 +46143,13 @@ type SecurityPolicyRuleMatcher struct { // Expr: User defined CEVAL expression. A CEVAL expression is used to // specify match criteria such as origin.ip, source.region_code and - // contents in the request header. + // contents in the request header. Expressions containing + // `evaluateThreatIntelligence` require Cloud Armor Managed Protection + // Plus tier and are not supported in Edge Policies nor in Regional + // Policies. Expressions containing + // `evaluatePreconfiguredExpr('sourceiplist-*')` require Cloud Armor + // Managed Protection Plus tier and are only supported in Global + // Security Policies. Expr *Expr `json:"expr,omitempty"` // VersionedExpr: Preconfigured versioned expression. If this field is @@ -44611,6 +46213,130 @@ func (s *SecurityPolicyRuleMatcherConfig) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SecurityPolicyRulePreconfiguredWafConfig struct { + // Exclusions: A list of exclusions to apply during preconfigured WAF + // evaluation. + Exclusions []*SecurityPolicyRulePreconfiguredWafConfigExclusion `json:"exclusions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Exclusions") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Exclusions") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyRulePreconfiguredWafConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRulePreconfiguredWafConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SecurityPolicyRulePreconfiguredWafConfigExclusion struct { + // RequestCookiesToExclude: A list of request cookie names whose value + // will be excluded from inspection during preconfigured WAF evaluation. + RequestCookiesToExclude []*SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams `json:"requestCookiesToExclude,omitempty"` + + // RequestHeadersToExclude: A list of request header names whose value + // will be excluded from inspection during preconfigured WAF evaluation. + RequestHeadersToExclude []*SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams `json:"requestHeadersToExclude,omitempty"` + + // RequestQueryParamsToExclude: A list of request query parameter names + // whose value will be excluded from inspection during preconfigured WAF + // evaluation. Note that the parameter can be in the query string or in + // the POST body. + RequestQueryParamsToExclude []*SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams `json:"requestQueryParamsToExclude,omitempty"` + + // RequestUrisToExclude: A list of request URIs from the request line to + // be excluded from inspection during preconfigured WAF evaluation. When + // specifying this field, the query or fragment part should be excluded. + RequestUrisToExclude []*SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams `json:"requestUrisToExclude,omitempty"` + + // TargetRuleIds: A list of target rule IDs under the WAF rule set to + // apply the preconfigured WAF exclusion. If omitted, it refers to all + // the rule IDs under the WAF rule set. + TargetRuleIds []string `json:"targetRuleIds,omitempty"` + + // TargetRuleSet: Target WAF rule set to apply the preconfigured WAF + // exclusion. + TargetRuleSet string `json:"targetRuleSet,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "RequestCookiesToExclude") to unconditionally include in API + // requests. By default, fields with empty or default values are omitted + // from API requests. However, any non-pointer, non-interface field + // appearing in ForceSendFields will be sent to the server regardless of + // whether the field is empty or not. This may be used to include empty + // fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RequestCookiesToExclude") + // to include in API requests with the JSON null value. By default, + // fields with empty values are omitted from API requests. However, any + // field with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyRulePreconfiguredWafConfigExclusion) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRulePreconfiguredWafConfigExclusion + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams struct { + // Op: The match operator for the field. + // + // Possible values: + // "CONTAINS" - The operator matches if the field value contains the + // specified value. + // "ENDS_WITH" - The operator matches if the field value ends with the + // specified value. + // "EQUALS" - The operator matches if the field value equals the + // specified value. + // "EQUALS_ANY" - The operator matches if the field value is any + // value. + // "STARTS_WITH" - The operator matches if the field value starts with + // the specified value. + Op string `json:"op,omitempty"` + + // Val: The value of the field. + Val string `json:"val,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Op") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Op") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type SecurityPolicyRuleRateLimitOptions struct { // BanDurationSec: Can only be specified if the action for the rule is // "rate_based_ban". If specified, determines the time (in seconds) the @@ -44662,6 +46388,13 @@ type SecurityPolicyRuleRateLimitOptions struct { // "XFF_IP" EnforceOnKey string `json:"enforceOnKey,omitempty"` + // EnforceOnKeyConfigs: If specified, any combination of values of + // enforce_on_key_type/enforce_on_key_name is treated as the key on + // which ratelimit threshold/action is enforced. You can specify up to 3 + // enforce_on_key_configs. If enforce_on_key_configs is specified, + // enforce_on_key must not be specified. + EnforceOnKeyConfigs []*SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig `json:"enforceOnKeyConfigs,omitempty"` + // EnforceOnKeyName: Rate limit key name applicable only for the // following key types: HTTP_HEADER -- Name of the HTTP header whose // value is taken as the key value. HTTP_COOKIE -- Name of the HTTP @@ -44671,14 +46404,16 @@ type SecurityPolicyRuleRateLimitOptions struct { // ExceedAction: Action to take for requests that are above the // configured rate limit threshold, to either deny with a specified HTTP // response code, or redirect to a different endpoint. Valid options are - // "deny(status)", where valid values for status are 403, 404, 429, and - // 502, and "redirect" where the redirect parameters come from - // exceedRedirectOptions below. + // `deny(STATUS)`, where valid values for `STATUS` are 403, 404, 429, + // and 502, and `redirect`, where the redirect parameters come from + // `exceedRedirectOptions` below. The `redirect` action is only + // supported in Global Security Policies of type CLOUD_ARMOR. ExceedAction string `json:"exceedAction,omitempty"` // ExceedRedirectOptions: Parameters defining the redirect action that // is used as the exceed action. Cannot be specified if the exceed - // action is not redirect. + // action is not redirect. This field is only supported in Global + // Security Policies of type CLOUD_ARMOR. ExceedRedirectOptions *SecurityPolicyRuleRedirectOptions `json:"exceedRedirectOptions,omitempty"` // RateLimitThreshold: Threshold at which to begin ratelimiting. @@ -44708,6 +46443,71 @@ func (s *SecurityPolicyRuleRateLimitOptions) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +type SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig struct { + // EnforceOnKeyName: Rate limit key name applicable only for the + // following key types: HTTP_HEADER -- Name of the HTTP header whose + // value is taken as the key value. HTTP_COOKIE -- Name of the HTTP + // cookie whose value is taken as the key value. + EnforceOnKeyName string `json:"enforceOnKeyName,omitempty"` + + // EnforceOnKeyType: Determines the key to enforce the + // rate_limit_threshold on. Possible values are: - ALL: A single rate + // limit threshold is applied to all the requests matching this rule. + // This is the default value if "enforceOnKeyConfigs" is not configured. + // - IP: The source IP address of the request is the key. Each IP has + // this limit enforced separately. - HTTP_HEADER: The value of the HTTP + // header whose name is configured under "enforceOnKeyName". The key + // value is truncated to the first 128 bytes of the header value. If no + // such header is present in the request, the key type defaults to ALL. + // - XFF_IP: The first IP address (i.e. the originating client IP + // address) specified in the list of IPs under X-Forwarded-For HTTP + // header. If no such header is present or the value is not a valid IP, + // the key defaults to the source IP address of the request i.e. key + // type IP. - HTTP_COOKIE: The value of the HTTP cookie whose name is + // configured under "enforceOnKeyName". The key value is truncated to + // the first 128 bytes of the cookie value. If no such cookie is present + // in the request, the key type defaults to ALL. - HTTP_PATH: The URL + // path of the HTTP request. The key value is truncated to the first 128 + // bytes. - SNI: Server name indication in the TLS session of the HTTPS + // request. The key value is truncated to the first 128 bytes. The key + // type defaults to ALL on a HTTP session. - REGION_CODE: The + // country/region from which the request originates. + // + // Possible values: + // "ALL" + // "HTTP_COOKIE" + // "HTTP_HEADER" + // "HTTP_PATH" + // "IP" + // "REGION_CODE" + // "SNI" + // "XFF_IP" + EnforceOnKeyType string `json:"enforceOnKeyType,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EnforceOnKeyName") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EnforceOnKeyName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig) MarshalJSON() ([]byte, error) { + type NoMethod SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + type SecurityPolicyRuleRateLimitOptionsThreshold struct { // Count: Number of HTTP(S) requests for calculating the threshold. Count int64 `json:"count,omitempty"` @@ -44781,7 +46581,7 @@ type SecuritySettings struct { // should authenticate with this service's backends. clientTlsPolicy // only applies to a global BackendService with the loadBalancingScheme // set to INTERNAL_SELF_MANAGED. If left blank, communications are not - // encrypted. Note: This field currently has no impact. + // encrypted. ClientTlsPolicy string `json:"clientTlsPolicy,omitempty"` // SubjectAltNames: Optional. A list of Subject Alternative Names (SANs) @@ -44796,8 +46596,7 @@ type SecuritySettings struct { // Public Key Infrastructure which provisions server identities. Only // applies to a global BackendService with loadBalancingScheme set to // INTERNAL_SELF_MANAGED. Only applies when BackendService has an - // attached clientTlsPolicy with clientCertificate (mTLS mode). Note: - // This field currently has no impact. + // attached clientTlsPolicy with clientCertificate (mTLS mode). SubjectAltNames []string `json:"subjectAltNames,omitempty"` // ForceSendFields is a list of field names (e.g. "ClientTlsPolicy") to @@ -44946,7 +46745,7 @@ func (s *ServiceAccount) MarshalJSON() ([]byte, error) { // attachment represents a service that a producer has exposed. It // encapsulates the load balancer which fronts the service runs and a // list of NAT IP ranges that the producers uses to represent the -// consumers connecting to the service. next tag = 20 +// consumers connecting to the service. type ServiceAttachment struct { // ConnectedEndpoints: [Output Only] An array of connections for all the // consumers connected to this service attachment. @@ -45031,6 +46830,18 @@ type ServiceAttachment struct { // the PSC service attachment. PscServiceAttachmentId *Uint128 `json:"pscServiceAttachmentId,omitempty"` + // ReconcileConnections: This flag determines whether a consumer + // accept/reject list change can reconcile the statuses of existing + // ACCEPTED or REJECTED PSC endpoints. - If false, connection policy + // update will only affect existing PENDING PSC endpoints. Existing + // ACCEPTED/REJECTED endpoints will remain untouched regardless how the + // connection policy is modified . - If true, update will affect both + // PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED + // PSC endpoint will be moved to REJECTED if its project is added to the + // reject list. For newly created service attachment, this boolean + // defaults to true. + ReconcileConnections bool `json:"reconcileConnections,omitempty"` + // Region: [Output Only] URL of the region where the service attachment // resides. This field applies only to the region resource. You must // specify this field as part of the HTTP request URL. It is not @@ -46382,8 +48193,8 @@ func (s *SourceDiskEncryptionKey) MarshalJSON() ([]byte, error) { // creating the instance template from a source instance. type SourceInstanceParams struct { // DiskConfigs: Attached disks configuration. If not provided, defaults - // are applied: For boot disk and any other R/W disks, new custom images - // will be created from each disk. For read-only disks, they will be + // are applied: For boot disk and any other R/W disks, the source images + // for each disk will be used. For read-only disks, they will be // attached in read-only mode. Local SSD disks will be created as blank // volumes. DiskConfigs []*DiskInstantiationConfig `json:"diskConfigs,omitempty"` @@ -48241,8 +50052,8 @@ type Subnetwork struct { // If this field is not explicitly set, it will not appear in get // listings. If not set the default behavior is determined by the org // policy, if there is no org policy specified, then it will default to - // disabled. This field isn't supported with the purpose field set to - // INTERNAL_HTTPS_LOAD_BALANCER. + // disabled. This field isn't supported if the subnet purpose field is + // set to REGIONAL_MANAGED_PROXY. EnableFlowLogs bool `json:"enableFlowLogs,omitempty"` // ExternalIpv6Prefix: The external IPv6 address range that is owned by @@ -48335,12 +50146,20 @@ type Subnetwork struct { PrivateIpv6GoogleAccess string `json:"privateIpv6GoogleAccess,omitempty"` // Purpose: The purpose of the resource. This field can be either - // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with - // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created - // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If - // unspecified, the purpose defaults to PRIVATE_RFC_1918. The - // enableFlowLogs field isn't supported with the purpose field set to - // INTERNAL_HTTPS_LOAD_BALANCER. + // PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or + // INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for + // user-created subnets or subnets that are automatically created in + // auto mode networks. A subnet with purpose set to + // REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved + // for regional Envoy-based load balancers. A subnet with purpose set to + // PRIVATE_SERVICE_CONNECT is used to publish services using Private + // Service Connect. A subnet with purpose set to + // INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used + // only by regional internal HTTP(S) load balancers. Note that + // REGIONAL_MANAGED_PROXY is the preferred setting for all regional + // Envoy load balancers. If unspecified, the subnet purpose defaults to + // PRIVATE. The enableFlowLogs field isn't supported if the subnet + // purpose field is set to REGIONAL_MANAGED_PROXY. // // Possible values: // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal @@ -48359,9 +50178,9 @@ type Subnetwork struct { Region string `json:"region,omitempty"` // Role: The role of subnetwork. Currently, this field is only used when - // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to - // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being - // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one + // purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or + // BACKUP. An ACTIVE subnetwork is one that is currently being used for + // Envoy-based load balancers in a region. A BACKUP subnetwork is one // that is ready to be promoted to ACTIVE or is currently draining. This // field can be updated with a patch request. // @@ -48836,6 +50655,8 @@ type SubnetworkLogConfig struct { // field is not explicitly set, it will not appear in get listings. If // not set the default behavior is determined by the org policy, if // there is no org policy specified, then it will default to disabled. + // Flow logging isn't supported if the subnet purpose field is set to + // REGIONAL_MANAGED_PROXY. Enable bool `json:"enable,omitempty"` // FilterExpr: Can only be specified if VPC flow logs for this @@ -50300,7 +52121,9 @@ func (s *TargetHttpsProxiesScopedListWarningData) MarshalJSON() ([]byte, error) type TargetHttpsProxiesSetCertificateMapRequest struct { // CertificateMap: URL of the Certificate Map to associate with this - // TargetHttpsProxy. + // TargetHttpsProxy. Accepted format is + // //certificatemanager.googleapis.com/projects/{project + // }/locations/{location}/certificateMaps/{resourceName}. CertificateMap string `json:"certificateMap,omitempty"` // ForceSendFields is a list of field names (e.g. "CertificateMap") to @@ -50417,7 +52240,9 @@ type TargetHttpsProxy struct { // CertificateMap: URL of a certificate map that identifies a // certificate map associated with the given target proxy. This field // can only be set for global target proxies. If set, sslCertificates - // will be ignored. + // will be ignored. Accepted format is + // //certificatemanager.googleapis.com/projects/{project + // }/locations/{location}/certificateMaps/{resourceName}. CertificateMap string `json:"certificateMap,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -50495,9 +52320,11 @@ type TargetHttpsProxy struct { // networksecurity.ServerTlsPolicy resource that describes how the proxy // should authenticate inbound traffic. serverTlsPolicy only applies to // a global TargetHttpsProxy attached to globalForwardingRules with the - // loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left blank, - // communications are not encrypted. Note: This field currently has no - // impact. + // loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL or + // EXTERNAL_MANAGED. For details which ServerTlsPolicy resources are + // accepted with INTERNAL_SELF_MANAGED and which with EXTERNAL, + // EXTERNAL_MANAGED loadBalancingScheme consult ServerTlsPolicy + // documentation. If left blank, communications are not encrypted. ServerTlsPolicy string `json:"serverTlsPolicy,omitempty"` // SslCertificates: URLs to SslCertificate resources that are used to @@ -52483,7 +54310,9 @@ func (s *TargetSslProxiesSetBackendServiceRequest) MarshalJSON() ([]byte, error) type TargetSslProxiesSetCertificateMapRequest struct { // CertificateMap: URL of the Certificate Map to associate with this - // TargetSslProxy. + // TargetSslProxy. Accepted format is + // //certificatemanager.googleapis.com/projects/{project + // }/locations/{location}/certificateMaps/{resourceName}. CertificateMap string `json:"certificateMap,omitempty"` // ForceSendFields is a list of field names (e.g. "CertificateMap") to @@ -52581,7 +54410,9 @@ type TargetSslProxy struct { // CertificateMap: URL of a certificate map that identifies a // certificate map associated with the given target proxy. This field // can only be set for global target proxies. If set, sslCertificates - // will be ignored. + // will be ignored. Accepted format is + // //certificatemanager.googleapis.com/projects/{project + // }/locations/{location}/certificateMaps/{resourceName}. CertificateMap string `json:"certificateMap,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -53576,6 +55407,21 @@ type TargetVpnGateway struct { // for target VPN gateways. Kind string `json:"kind,omitempty"` + // LabelFingerprint: A fingerprint for the labels being applied to this + // TargetVpnGateway, which is essentially a hash of the labels set used + // for optimistic locking. The fingerprint is initially generated by + // Compute Engine and changes after every request to modify or update + // labels. You must always provide an up-to-date fingerprint hash in + // order to update or change labels, otherwise the request will fail + // with error 412 conditionNotMet. To see the latest fingerprint, make a + // get() request to retrieve a TargetVpnGateway. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels for this resource. These can only be added or modified + // by the setLabels method. Each label key/value pair must comply with + // RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + // Name: Name of the resource. Provided by the client when the resource // is created. The name must be 1-63 characters long, and comply with // RFC1035. Specifically, the name must be 1-63 characters long and @@ -55288,6 +57134,22 @@ type UrlRewrite struct { // characters. PathPrefixRewrite string `json:"pathPrefixRewrite,omitempty"` + // PathTemplateRewrite: If specified, the pattern rewrites the URL path + // (based on the :path header) using the HTTP template syntax. A + // corresponding path_template_match must be specified. Any template + // variables must exist in the path_template_match field. - -At least + // one variable must be specified in the path_template_match field - You + // can omit variables from the rewritten URL - The * and ** operators + // cannot be matched unless they have a corresponding variable name - + // e.g. {format=*} or {var=**}. For example, a path_template_match of + // /static/{format=**} could be rewritten as /static/content/{format} to + // prefix /content to the URL. Variables can also be re-ordered in a + // rewrite, so that /{country}/{format}/{suffix=**} can be rewritten as + // /content/{format}/{country}/{suffix}. At least one non-empty + // routeRules[].matchRules[].path_template_match is required. Only one + // of path_prefix_rewrite or path_template_rewrite may be specified. + PathTemplateRewrite string `json:"pathTemplateRewrite,omitempty"` + // ForceSendFields is a list of field names (e.g. "HostRewrite") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -55341,12 +57203,20 @@ type UsableSubnetwork struct { Network string `json:"network,omitempty"` // Purpose: The purpose of the resource. This field can be either - // PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with - // purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created - // subnetwork that is reserved for Internal HTTP(S) Load Balancing. If - // unspecified, the purpose defaults to PRIVATE_RFC_1918. The - // enableFlowLogs field isn't supported with the purpose field set to - // INTERNAL_HTTPS_LOAD_BALANCER. + // PRIVATE, REGIONAL_MANAGED_PROXY, PRIVATE_SERVICE_CONNECT, or + // INTERNAL_HTTPS_LOAD_BALANCER. PRIVATE is the default purpose for + // user-created subnets or subnets that are automatically created in + // auto mode networks. A subnet with purpose set to + // REGIONAL_MANAGED_PROXY is a user-created subnetwork that is reserved + // for regional Envoy-based load balancers. A subnet with purpose set to + // PRIVATE_SERVICE_CONNECT is used to publish services using Private + // Service Connect. A subnet with purpose set to + // INTERNAL_HTTPS_LOAD_BALANCER is a proxy-only subnet that can be used + // only by regional internal HTTP(S) load balancers. Note that + // REGIONAL_MANAGED_PROXY is the preferred setting for all regional + // Envoy load balancers. If unspecified, the subnet purpose defaults to + // PRIVATE. The enableFlowLogs field isn't supported if the subnet + // purpose field is set to REGIONAL_MANAGED_PROXY. // // Possible values: // "INTERNAL_HTTPS_LOAD_BALANCER" - Subnet reserved for Internal @@ -55361,9 +57231,9 @@ type UsableSubnetwork struct { Purpose string `json:"purpose,omitempty"` // Role: The role of subnetwork. Currently, this field is only used when - // purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to - // ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently being - // used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork is one + // purpose = REGIONAL_MANAGED_PROXY. The value can be set to ACTIVE or + // BACKUP. An ACTIVE subnetwork is one that is currently being used for + // Envoy-based load balancers in a region. A BACKUP subnetwork is one // that is ready to be promoted to ACTIVE or is currently draining. This // field can be updated with a patch request. // @@ -56054,7 +57924,7 @@ type VpnGateway struct { // You must always provide an up-to-date fingerprint hash in order to // update or change labels, otherwise the request will fail with error // 412 conditionNotMet. To see the latest fingerprint, make a get() - // request to retrieve an VpnGateway. + // request to retrieve a VpnGateway. LabelFingerprint string `json:"labelFingerprint,omitempty"` // Labels: Labels for this resource. These can only be added or modified @@ -56592,7 +58462,7 @@ type VpnGatewayStatusTunnel struct { // PeerGatewayInterface: The peer gateway interface this VPN tunnel is // connected to, the peer gateway could either be an external VPN - // gateway or GCP VPN gateway. + // gateway or a Google Cloud VPN gateway. PeerGatewayInterface int64 `json:"peerGatewayInterface,omitempty"` // TunnelUrl: URL reference to the VPN tunnel. @@ -56625,8 +58495,8 @@ func (s *VpnGatewayStatusTunnel) MarshalJSON() ([]byte, error) { // VpnGatewayStatusVpnConnection: A VPN connection contains all VPN // tunnels connected from this VpnGateway to the same peer gateway. The -// peer gateway could either be a external VPN gateway or GCP VPN -// gateway. +// peer gateway could either be an external VPN gateway or a Google +// Cloud VPN gateway. type VpnGatewayStatusVpnConnection struct { // PeerExternalGateway: URL reference to the peer external VPN gateways // to which the VPN tunnels in this VPN connection are connected. This @@ -56943,6 +58813,21 @@ type VpnTunnel struct { // VPN tunnels. Kind string `json:"kind,omitempty"` + // LabelFingerprint: A fingerprint for the labels being applied to this + // VpnTunnel, which is essentially a hash of the labels set used for + // optimistic locking. The fingerprint is initially generated by Compute + // Engine and changes after every request to modify or update labels. + // You must always provide an up-to-date fingerprint hash in order to + // update or change labels, otherwise the request will fail with error + // 412 conditionNotMet. To see the latest fingerprint, make a get() + // request to retrieve a VpnTunnel. + LabelFingerprint string `json:"labelFingerprint,omitempty"` + + // Labels: Labels for this resource. These can only be added or modified + // by the setLabels method. Each label key/value pair must comply with + // RFC1035. Label values may be empty. + Labels map[string]string `json:"labels,omitempty"` + // LocalTrafficSelector: Local traffic selector to use when establishing // the VPN tunnel with the peer VPN gateway. The value should be a CIDR // formatted string, for example: 192.168.0.0/16. The ranges must be @@ -57691,6 +59576,11 @@ type WafExpressionSetExpression struct { // positive. required Id string `json:"id,omitempty"` + // Sensitivity: The sensitivity value associated with the WAF rule ID. + // This corresponds to the ModSecurity paranoia level, ranging from 1 to + // 4. 0 is reserved for opt-in only rules. + Sensitivity int64 `json:"sensitivity,omitempty"` + // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty or default values are omitted from API requests. However, any @@ -60209,6 +62099,194 @@ func (c *AddressesListCall) Pages(ctx context.Context, f func(*AddressList) erro } } +// method id "compute.addresses.move": + +type AddressesMoveCall struct { + s *Service + project string + region string + address string + regionaddressesmoverequest *RegionAddressesMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Move: Moves the specified address resource. +// +// - address: Name of the address resource to move. +// - project: Source project ID which the Address is moved from. +// - region: Name of the region for this request. +func (r *AddressesService) Move(project string, region string, address string, regionaddressesmoverequest *RegionAddressesMoveRequest) *AddressesMoveCall { + c := &AddressesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.address = address + c.regionaddressesmoverequest = regionaddressesmoverequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *AddressesMoveCall) RequestId(requestId string) *AddressesMoveCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *AddressesMoveCall) Fields(s ...googleapi.Field) *AddressesMoveCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *AddressesMoveCall) Context(ctx context.Context) *AddressesMoveCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *AddressesMoveCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *AddressesMoveCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regionaddressesmoverequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/addresses/{address}/move") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "address": c.address, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.addresses.move" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *AddressesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Moves the specified address resource.", + // "flatPath": "projects/{project}/regions/{region}/addresses/{address}/move", + // "httpMethod": "POST", + // "id": "compute.addresses.move", + // "parameterOrder": [ + // "project", + // "region", + // "address" + // ], + // "parameters": { + // "address": { + // "description": "Name of the address resource to move.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Source project ID which the Address is moved from.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/addresses/{address}/move", + // "request": { + // "$ref": "RegionAddressesMoveRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.addresses.setLabels": type AddressesSetLabelsCall struct { @@ -60886,8 +62964,7 @@ type AutoscalersGetCall struct { header_ http.Header } -// Get: Returns the specified autoscaler resource. Gets a list of -// available autoscalers by making a list() request. +// Get: Returns the specified autoscaler resource. // // - autoscaler: Name of the autoscaler to return. // - project: Project ID for this request. @@ -61001,7 +63078,7 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro } return ret, nil // { - // "description": "Returns the specified autoscaler resource. Gets a list of available autoscalers by making a list() request.", + // "description": "Returns the specified autoscaler resource.", // "flatPath": "projects/{project}/zones/{zone}/autoscalers/{autoscaler}", // "httpMethod": "GET", // "id": "compute.autoscalers.get", @@ -62427,8 +64504,7 @@ type BackendBucketsGetCall struct { header_ http.Header } -// Get: Returns the specified BackendBucket resource. Gets a list of -// available backend buckets by making a list() request. +// Get: Returns the specified BackendBucket resource. // // - backendBucket: Name of the BackendBucket resource to return. // - project: Project ID for this request. @@ -62539,7 +64615,7 @@ func (c *BackendBucketsGetCall) Do(opts ...googleapi.CallOption) (*BackendBucket } return ret, nil // { - // "description": "Returns the specified BackendBucket resource. Gets a list of available backend buckets by making a list() request.", + // "description": "Returns the specified BackendBucket resource.", // "flatPath": "projects/{project}/global/backendBuckets/{backendBucket}", // "httpMethod": "GET", // "id": "compute.backendBuckets.get", @@ -64382,8 +66458,7 @@ type BackendServicesGetCall struct { header_ http.Header } -// Get: Returns the specified BackendService resource. Gets a list of -// available backend services. +// Get: Returns the specified BackendService resource. // // - backendService: Name of the BackendService resource to return. // - project: Project ID for this request. @@ -64494,7 +66569,7 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi } return ret, nil // { - // "description": "Returns the specified BackendService resource. Gets a list of available backend services.", + // "description": "Returns the specified BackendService resource.", // "flatPath": "projects/{project}/global/backendServices/{backendService}", // "httpMethod": "GET", // "id": "compute.backendServices.get", @@ -66488,8 +68563,7 @@ type DiskTypesGetCall struct { header_ http.Header } -// Get: Returns the specified disk type. Gets a list of available disk -// types by making a list() request. +// Get: Returns the specified disk type. // // - diskType: Name of the disk type to return. // - project: Project ID for this request. @@ -66603,7 +68677,7 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { } return ret, nil // { - // "description": "Returns the specified disk type. Gets a list of available disk types by making a list() request.", + // "description": "Returns the specified disk type.", // "flatPath": "projects/{project}/zones/{zone}/diskTypes/{diskType}", // "httpMethod": "GET", // "id": "compute.diskTypes.get", @@ -67824,8 +69898,7 @@ type DisksGetCall struct { header_ http.Header } -// Get: Returns a specified persistent disk. Gets a list of available -// persistent disks by making a list() request. +// Get: Returns the specified persistent disk. // // - disk: Name of the persistent disk to return. // - project: Project ID for this request. @@ -67939,7 +70012,7 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { } return ret, nil // { - // "description": "Returns a specified persistent disk. Gets a list of available persistent disks by making a list() request.", + // "description": "Returns the specified persistent disk.", // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}", // "httpMethod": "GET", // "id": "compute.disks.get", @@ -69558,6 +71631,221 @@ func (c *DisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestPer } +// method id "compute.disks.update": + +type DisksUpdateCall struct { + s *Service + project string + zone string + disk string + disk2 *Disk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates the specified disk with the data included in the +// request. The update is performed only on selected fields included as +// part of update-mask. Only the following fields can be modified: +// user_license. +// +// - disk: The disk name for this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *DisksService) Update(project string, zone string, disk string, disk2 *Disk) *DisksUpdateCall { + c := &DisksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + c.disk2 = disk2 + return c +} + +// Paths sets the optional parameter "paths": +func (c *DisksUpdateCall) Paths(paths ...string) *DisksUpdateCall { + c.urlParams_.SetMulti("paths", append([]string{}, paths...)) + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *DisksUpdateCall) RequestId(requestId string) *DisksUpdateCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// UpdateMask sets the optional parameter "updateMask": update_mask +// indicates fields to be updated as part of this request. +func (c *DisksUpdateCall) UpdateMask(updateMask string) *DisksUpdateCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksUpdateCall) Fields(s ...googleapi.Field) *DisksUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksUpdateCall) Context(ctx context.Context) *DisksUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DisksUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DisksUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/disks/{disk}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the specified disk with the data included in the request. The update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: user_license.", + // "flatPath": "projects/{project}/zones/{zone}/disks/{disk}", + // "httpMethod": "PATCH", + // "id": "compute.disks.update", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "The disk name for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "paths": { + // "location": "query", + // "repeated": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "updateMask": { + // "description": "update_mask indicates fields to be updated as part of this request.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/disks/{disk}", + // "request": { + // "$ref": "Disk" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.externalVpnGateways.delete": type ExternalVpnGatewaysDeleteCall struct { @@ -72193,7 +74481,9 @@ func (c *FirewallPoliciesListCall) PageToken(pageToken string) *FirewallPolicies } // ParentId sets the optional parameter "parentId": Parent ID for this -// request. +// request. The ID can be either be "folders/[FOLDER_ID]" if the parent +// is a folder or "organizations/[ORGANIZATION_ID]" if the parent is an +// organization. func (c *FirewallPoliciesListCall) ParentId(parentId string) *FirewallPoliciesListCall { c.urlParams_.Set("parentId", parentId) return c @@ -72333,7 +74623,7 @@ func (c *FirewallPoliciesListCall) Do(opts ...googleapi.CallOption) (*FirewallPo // "type": "string" // }, // "parentId": { - // "description": "Parent ID for this request.", + // "description": "Parent ID for this request. The ID can be either be \"folders/[FOLDER_ID]\" if the parent is a folder or \"organizations/[ORGANIZATION_ID]\" if the parent is an organization.", // "location": "query", // "type": "string" // }, @@ -72544,7 +74834,9 @@ func (r *FirewallPoliciesService) Move(firewallPolicy string) *FirewallPoliciesM } // ParentId sets the optional parameter "parentId": The new parent of -// the firewall policy. +// the firewall policy. The ID can be either be "folders/[FOLDER_ID]" if +// the parent is a folder or "organizations/[ORGANIZATION_ID]" if the +// parent is an organization. func (c *FirewallPoliciesMoveCall) ParentId(parentId string) *FirewallPoliciesMoveCall { c.urlParams_.Set("parentId", parentId) return c @@ -72668,7 +74960,7 @@ func (c *FirewallPoliciesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, // "type": "string" // }, // "parentId": { - // "description": "The new parent of the firewall policy.", + // "description": "The new parent of the firewall policy. The ID can be either be \"folders/[FOLDER_ID]\" if the parent is a folder or \"organizations/[ORGANIZATION_ID]\" if the parent is an organization.", // "location": "query", // "type": "string" // }, @@ -76647,8 +78939,7 @@ type GlobalAddressesGetCall struct { header_ http.Header } -// Get: Returns the specified address resource. Gets a list of available -// addresses by making a list() request. +// Get: Returns the specified address resource. // // - address: Name of the address resource to return. // - project: Project ID for this request. @@ -76759,7 +79050,7 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err } return ret, nil // { - // "description": "Returns the specified address resource. Gets a list of available addresses by making a list() request.", + // "description": "Returns the specified address resource.", // "flatPath": "projects/{project}/global/addresses/{address}", // "httpMethod": "GET", // "id": "compute.globalAddresses.get", @@ -77240,35 +79531,51 @@ func (c *GlobalAddressesListCall) Pages(ctx context.Context, f func(*AddressList } } -// method id "compute.globalAddresses.setLabels": +// method id "compute.globalAddresses.move": -type GlobalAddressesSetLabelsCall struct { - s *Service - project string - resource string - globalsetlabelsrequest *GlobalSetLabelsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalAddressesMoveCall struct { + s *Service + project string + address string + globaladdressesmoverequest *GlobalAddressesMoveRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetLabels: Sets the labels on a GlobalAddress. To learn more about -// labels, read the Labeling Resources documentation. +// Move: Moves the specified address resource from one project to +// another project. // -// - project: Project ID for this request. -// - resource: Name or id of the resource for this request. -func (r *GlobalAddressesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalAddressesSetLabelsCall { - c := &GlobalAddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - address: Name of the address resource to move. +// - project: Source project ID which the Address is moved from. +func (r *GlobalAddressesService) Move(project string, address string, globaladdressesmoverequest *GlobalAddressesMoveRequest) *GlobalAddressesMoveCall { + c := &GlobalAddressesMoveCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.resource = resource - c.globalsetlabelsrequest = globalsetlabelsrequest + c.address = address + c.globaladdressesmoverequest = globaladdressesmoverequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *GlobalAddressesMoveCall) RequestId(requestId string) *GlobalAddressesMoveCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalAddressesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalAddressesSetLabelsCall { +func (c *GlobalAddressesMoveCall) Fields(s ...googleapi.Field) *GlobalAddressesMoveCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -77276,21 +79583,21 @@ func (c *GlobalAddressesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalAddre // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalAddressesSetLabelsCall) Context(ctx context.Context) *GlobalAddressesSetLabelsCall { +func (c *GlobalAddressesMoveCall) Context(ctx context.Context) *GlobalAddressesMoveCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalAddressesSetLabelsCall) Header() http.Header { +func (c *GlobalAddressesMoveCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalAddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesMoveCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -77298,14 +79605,14 @@ func (c *GlobalAddressesSetLabelsCall) doRequest(alt string) (*http.Response, er } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globaladdressesmoverequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{resource}/setLabels") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{address}/move") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -77313,20 +79620,20 @@ func (c *GlobalAddressesSetLabelsCall) doRequest(alt string) (*http.Response, er } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "resource": c.resource, + "project": c.project, + "address": c.address, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalAddresses.setLabels" call. +// Do executes the "compute.globalAddresses.move" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalAddressesMoveCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -77357,33 +79664,38 @@ func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat } return ret, nil // { - // "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", - // "flatPath": "projects/{project}/global/addresses/{resource}/setLabels", + // "description": "Moves the specified address resource from one project to another project.", + // "flatPath": "projects/{project}/global/addresses/{address}/move", // "httpMethod": "POST", - // "id": "compute.globalAddresses.setLabels", + // "id": "compute.globalAddresses.move", // "parameterOrder": [ // "project", - // "resource" + // "address" // ], // "parameters": { - // "project": { - // "description": "Project ID for this request.", + // "address": { + // "description": "Name of the address resource to move.", // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, - // "resource": { - // "description": "Name or id of the resource for this request.", + // "project": { + // "description": "Source project ID which the Address is moved from.", // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/global/addresses/{resource}/setLabels", + // "path": "projects/{project}/global/addresses/{address}/move", // "request": { - // "$ref": "GlobalSetLabelsRequest" + // "$ref": "GlobalAddressesMoveRequest" // }, // "response": { // "$ref": "Operation" @@ -77396,48 +79708,35 @@ func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operat } -// method id "compute.globalForwardingRules.delete": +// method id "compute.globalAddresses.setLabels": -type GlobalForwardingRulesDeleteCall struct { - s *Service - project string - forwardingRule string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type GlobalAddressesSetLabelsCall struct { + s *Service + project string + resource string + globalsetlabelsrequest *GlobalSetLabelsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified GlobalForwardingRule resource. +// SetLabels: Sets the labels on a GlobalAddress. To learn more about +// labels, read the Labeling Resources documentation. // -// - forwardingRule: Name of the ForwardingRule resource to delete. // - project: Project ID for this request. -func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { - c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - resource: Name or id of the resource for this request. +func (r *GlobalAddressesService) SetLabels(project string, resource string, globalsetlabelsrequest *GlobalSetLabelsRequest) *GlobalAddressesSetLabelsCall { + c := &GlobalAddressesSetLabelsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project - c.forwardingRule = forwardingRule - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalForwardingRulesDeleteCall { - c.urlParams_.Set("requestId", requestId) + c.resource = resource + c.globalsetlabelsrequest = globalsetlabelsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall { +func (c *GlobalAddressesSetLabelsCall) Fields(s ...googleapi.Field) *GlobalAddressesSetLabelsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -77445,21 +79744,21 @@ func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalFo // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalForwardingRulesDeleteCall { +func (c *GlobalAddressesSetLabelsCall) Context(ctx context.Context) *GlobalAddressesSetLabelsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { +func (c *GlobalAddressesSetLabelsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *GlobalAddressesSetLabelsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -77467,30 +79766,199 @@ func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.globalsetlabelsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/forwardingRules/{forwardingRule}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/addresses/{resource}/setLabels") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "forwardingRule": c.forwardingRule, + "project": c.project, + "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.globalForwardingRules.delete" call. +// Do executes the "compute.globalAddresses.setLabels" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *GlobalAddressesSetLabelsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the labels on a GlobalAddress. To learn more about labels, read the Labeling Resources documentation.", + // "flatPath": "projects/{project}/global/addresses/{resource}/setLabels", + // "httpMethod": "POST", + // "id": "compute.globalAddresses.setLabels", + // "parameterOrder": [ + // "project", + // "resource" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "resource": { + // "description": "Name or id of the resource for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9_]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/addresses/{resource}/setLabels", + // "request": { + // "$ref": "GlobalSetLabelsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.globalForwardingRules.delete": + +type GlobalForwardingRulesDeleteCall struct { + s *Service + project string + forwardingRule string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified GlobalForwardingRule resource. +// +// - forwardingRule: Name of the ForwardingRule resource to delete. +// - project: Project ID for this request. +func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall { + c := &GlobalForwardingRulesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.forwardingRule = forwardingRule + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *GlobalForwardingRulesDeleteCall) RequestId(requestId string) *GlobalForwardingRulesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalForwardingRulesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *GlobalForwardingRulesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/forwardingRules/{forwardingRule}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "forwardingRule": c.forwardingRule, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.globalForwardingRules.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -79215,8 +81683,7 @@ type GlobalNetworkEndpointGroupsGetCall struct { header_ http.Header } -// Get: Returns the specified network endpoint group. Gets a list of -// available network endpoint groups by making a list() request. +// Get: Returns the specified network endpoint group. // // - networkEndpointGroup: The name of the network endpoint group. It // should comply with RFC1035. @@ -79328,7 +81795,7 @@ func (c *GlobalNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + // "description": "Returns the specified network endpoint group.", // "flatPath": "projects/{project}/global/networkEndpointGroups/{networkEndpointGroup}", // "httpMethod": "GET", // "id": "compute.globalNetworkEndpointGroups.get", @@ -83081,8 +85548,7 @@ type HealthChecksGetCall struct { header_ http.Header } -// Get: Returns the specified HealthCheck resource. Gets a list of -// available health checks by making a list() request. +// Get: Returns the specified HealthCheck resource. // // - healthCheck: Name of the HealthCheck resource to return. // - project: Project ID for this request. @@ -83193,7 +85659,7 @@ func (c *HealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthCheck, er } return ret, nil // { - // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + // "description": "Returns the specified HealthCheck resource.", // "flatPath": "projects/{project}/global/healthChecks/{healthCheck}", // "httpMethod": "GET", // "id": "compute.healthChecks.get", @@ -84208,8 +86674,7 @@ type HttpHealthChecksGetCall struct { header_ http.Header } -// Get: Returns the specified HttpHealthCheck resource. Gets a list of -// available HTTP health checks by making a list() request. +// Get: Returns the specified HttpHealthCheck resource. // // - httpHealthCheck: Name of the HttpHealthCheck resource to return. // - project: Project ID for this request. @@ -84320,7 +86785,7 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC } return ret, nil // { - // "description": "Returns the specified HttpHealthCheck resource. Gets a list of available HTTP health checks by making a list() request.", + // "description": "Returns the specified HttpHealthCheck resource.", // "flatPath": "projects/{project}/global/httpHealthChecks/{httpHealthCheck}", // "httpMethod": "GET", // "id": "compute.httpHealthChecks.get", @@ -85335,8 +87800,7 @@ type HttpsHealthChecksGetCall struct { header_ http.Header } -// Get: Returns the specified HttpsHealthCheck resource. Gets a list of -// available HTTPS health checks by making a list() request. +// Get: Returns the specified HttpsHealthCheck resource. // // - httpsHealthCheck: Name of the HttpsHealthCheck resource to return. // - project: Project ID for this request. @@ -85447,7 +87911,7 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt } return ret, nil // { - // "description": "Returns the specified HttpsHealthCheck resource. Gets a list of available HTTPS health checks by making a list() request.", + // "description": "Returns the specified HttpsHealthCheck resource.", // "flatPath": "projects/{project}/global/httpsHealthChecks/{httpsHealthCheck}", // "httpMethod": "GET", // "id": "compute.httpsHealthChecks.get", @@ -86812,8 +89276,7 @@ type ImagesGetCall struct { header_ http.Header } -// Get: Returns the specified image. Gets a list of available images by -// making a list() request. +// Get: Returns the specified image. // // - image: Name of the image resource to return. // - project: Project ID for this request. @@ -86924,7 +89387,7 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { } return ret, nil // { - // "description": "Returns the specified image. Gets a list of available images by making a list() request.", + // "description": "Returns the specified image.", // "flatPath": "projects/{project}/global/images/{image}", // "httpMethod": "GET", // "id": "compute.images.get", @@ -86974,10 +89437,12 @@ type ImagesGetFromFamilyCall struct { } // GetFromFamily: Returns the latest image that is part of an image -// family and is not deprecated. +// family and is not deprecated. For more information on image families, +// see Public image families documentation. // -// - family: Name of the image family to search for. -// - project: Project ID for this request. +// - family: Name of the image family to search for. +// - project: The image project that the image belongs to. For example, +// to get a CentOS image, specify centos-cloud as the image project. func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -87085,7 +89550,7 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro } return ret, nil // { - // "description": "Returns the latest image that is part of an image family and is not deprecated.", + // "description": "Returns the latest image that is part of an image family and is not deprecated. For more information on image families, see Public image families documentation.", // "flatPath": "projects/{project}/global/images/family/{family}", // "httpMethod": "GET", // "id": "compute.images.getFromFamily", @@ -87102,7 +89567,7 @@ func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, erro // "type": "string" // }, // "project": { - // "description": "Project ID for this request.", + // "description": "The image project that the image belongs to. For example, to get a CentOS image, specify centos-cloud as the image project.", // "location": "path", // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", // "required": true, @@ -89828,8 +92293,7 @@ type InstanceGroupManagersGetCall struct { } // Get: Returns all of the details about the specified managed instance -// group. Gets a list of available managed instance groups by making a -// list() request. +// group. // // - instanceGroupManager: The name of the managed instance group. // - project: Project ID for this request. @@ -89944,7 +92408,7 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan } return ret, nil // { - // "description": "Returns all of the details about the specified managed instance group. Gets a list of available managed instance groups by making a list() request.", + // "description": "Returns all of the details about the specified managed instance group.", // "flatPath": "projects/{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}", // "httpMethod": "GET", // "id": "compute.instanceGroupManagers.get", @@ -94716,6 +97180,304 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope } +// method id "compute.instanceTemplates.aggregatedList": + +type InstanceTemplatesAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// AggregatedList: Retrieves the list of all InstanceTemplates +// resources, regional and global, available to the specified project. +// +// - project: Name of the project scoping this request. +func (r *InstanceTemplatesService) AggregatedList(project string) *InstanceTemplatesAggregatedListCall { + c := &InstanceTemplatesAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InstanceTemplatesAggregatedListCall) Filter(filter string) *InstanceTemplatesAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// IncludeAllScopes sets the optional parameter "includeAllScopes": +// Indicates whether every visible scope for each scope type (zone, +// region, global) should be included in the response. For new resource +// types added after this field, the flag has no effect as new resource +// types will always include every visible scope for each scope type in +// response. For resource types which predate this field, if this flag +// is omitted or false, only scopes of the scope types where the +// resource type is expected to be found will be included. +func (c *InstanceTemplatesAggregatedListCall) IncludeAllScopes(includeAllScopes bool) *InstanceTemplatesAggregatedListCall { + c.urlParams_.Set("includeAllScopes", fmt.Sprint(includeAllScopes)) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InstanceTemplatesAggregatedListCall) MaxResults(maxResults int64) *InstanceTemplatesAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *InstanceTemplatesAggregatedListCall) OrderBy(orderBy string) *InstanceTemplatesAggregatedListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InstanceTemplatesAggregatedListCall) PageToken(pageToken string) *InstanceTemplatesAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *InstanceTemplatesAggregatedListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InstanceTemplatesAggregatedListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstanceTemplatesAggregatedListCall) Fields(s ...googleapi.Field) *InstanceTemplatesAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InstanceTemplatesAggregatedListCall) IfNoneMatch(entityTag string) *InstanceTemplatesAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstanceTemplatesAggregatedListCall) Context(ctx context.Context) *InstanceTemplatesAggregatedListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstanceTemplatesAggregatedListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstanceTemplatesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/aggregated/instanceTemplates") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instanceTemplates.aggregatedList" call. +// Exactly one of *InstanceTemplateAggregatedList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InstanceTemplateAggregatedList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InstanceTemplatesAggregatedListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &InstanceTemplateAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of all InstanceTemplates resources, regional and global, available to the specified project.", + // "flatPath": "projects/{project}/aggregated/instanceTemplates", + // "httpMethod": "GET", + // "id": "compute.instanceTemplates.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "includeAllScopes": { + // "description": "Indicates whether every visible scope for each scope type (zone, region, global) should be included in the response. For new resource types added after this field, the flag has no effect as new resource types will always include every visible scope for each scope type in response. For resource types which predate this field, if this flag is omitted or false, only scopes of the scope types where the resource type is expected to be found will be included.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Name of the project scoping this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/aggregated/instanceTemplates", + // "response": { + // "$ref": "InstanceTemplateAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InstanceTemplatesAggregatedListCall) Pages(ctx context.Context, f func(*InstanceTemplateAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.instanceTemplates.delete": type InstanceTemplatesDeleteCall struct { @@ -94896,8 +97658,7 @@ type InstanceTemplatesGetCall struct { header_ http.Header } -// Get: Returns the specified instance template. Gets a list of -// available instance templates by making a list() request. +// Get: Returns the specified instance template. // // - instanceTemplate: The name of the instance template. // - project: Project ID for this request. @@ -95008,7 +97769,7 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe } return ret, nil // { - // "description": "Returns the specified instance template. Gets a list of available instance templates by making a list() request.", + // "description": "Returns the specified instance template.", // "flatPath": "projects/{project}/global/instanceTemplates/{instanceTemplate}", // "httpMethod": "GET", // "id": "compute.instanceTemplates.get", @@ -97630,8 +100391,7 @@ type InstancesGetCall struct { header_ http.Header } -// Get: Returns the specified Instance resource. Gets a list of -// available instances by making a list() request. +// Get: Returns the specified Instance resource. // // - instance: Name of the instance resource to return. // - project: Project ID for this request. @@ -97745,7 +100505,7 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { } return ret, nil // { - // "description": "Returns the specified Instance resource. Gets a list of available instances by making a list() request.", + // "description": "Returns the specified Instance resource.", // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}", // "httpMethod": "GET", // "id": "compute.instances.get", @@ -101910,35 +104670,30 @@ func (c *InstancesSetMinCpuPlatformCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instances.setScheduling": +// method id "compute.instances.setName": -type InstancesSetSchedulingCall struct { - s *Service - project string - zone string - instance string - scheduling *Scheduling - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetNameCall struct { + s *Service + project string + zone string + instance string + instancessetnamerequest *InstancesSetNameRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetScheduling: Sets an instance's scheduling options. You can only -// call this method on a stopped instance, that is, a VM instance that -// is in a `TERMINATED` state. See Instance Life Cycle for more -// information on the possible instance states. For more information -// about setting scheduling options for a VM, see Set VM host -// maintenance policy. +// SetName: Sets name of an instance. // -// - instance: Instance name for this request. +// - instance: The instance name for this request. // - project: Project ID for this request. // - zone: The name of the zone for this request. -func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { - c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstancesService) SetName(project string, zone string, instance string, instancessetnamerequest *InstancesSetNameRequest) *InstancesSetNameCall { + c := &InstancesSetNameCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.scheduling = scheduling + c.instancessetnamerequest = instancessetnamerequest return c } @@ -101953,7 +104708,7 @@ func (r *InstancesService) SetScheduling(project string, zone string, instance s // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSchedulingCall { +func (c *InstancesSetNameCall) RequestId(requestId string) *InstancesSetNameCall { c.urlParams_.Set("requestId", requestId) return c } @@ -101961,7 +104716,7 @@ func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSc // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { +func (c *InstancesSetNameCall) Fields(s ...googleapi.Field) *InstancesSetNameCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -101969,21 +104724,21 @@ func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetS // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { +func (c *InstancesSetNameCall) Context(ctx context.Context) *InstancesSetNameCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetSchedulingCall) Header() http.Header { +func (c *InstancesSetNameCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetNameCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -101991,14 +104746,14 @@ func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, erro } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetnamerequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setScheduling") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setName") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -102013,14 +104768,14 @@ func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, erro return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setScheduling" call. +// Do executes the "compute.instances.setName" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetNameCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102051,10 +104806,10 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio } return ret, nil // { - // "description": "Sets an instance's scheduling options. You can only call this method on a stopped instance, that is, a VM instance that is in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. For more information about setting scheduling options for a VM, see Set VM host maintenance policy.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setScheduling", + // "description": "Sets name of an instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setName", // "httpMethod": "POST", - // "id": "compute.instances.setScheduling", + // "id": "compute.instances.setName", // "parameterOrder": [ // "project", // "zone", @@ -102062,7 +104817,7 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio // ], // "parameters": { // "instance": { - // "description": "Instance name for this request.", + // "description": "The instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -102088,9 +104843,9 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/setScheduling", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setName", // "request": { - // "$ref": "Scheduling" + // "$ref": "InstancesSetNameRequest" // }, // "response": { // "$ref": "Operation" @@ -102103,32 +104858,35 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio } -// method id "compute.instances.setServiceAccount": +// method id "compute.instances.setScheduling": -type InstancesSetServiceAccountCall struct { - s *Service - project string - zone string - instance string - instancessetserviceaccountrequest *InstancesSetServiceAccountRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetSchedulingCall struct { + s *Service + project string + zone string + instance string + scheduling *Scheduling + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetServiceAccount: Sets the service account on the instance. For more -// information, read Changing the service account and access scopes for -// an instance. +// SetScheduling: Sets an instance's scheduling options. You can only +// call this method on a stopped instance, that is, a VM instance that +// is in a `TERMINATED` state. See Instance Life Cycle for more +// information on the possible instance states. For more information +// about setting scheduling options for a VM, see Set VM host +// maintenance policy. // -// - instance: Name of the instance resource to start. +// - instance: Instance name for this request. // - project: Project ID for this request. // - zone: The name of the zone for this request. -func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { - c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall { + c := &InstancesSetSchedulingCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.instancessetserviceaccountrequest = instancessetserviceaccountrequest + c.scheduling = scheduling return c } @@ -102143,7 +104901,7 @@ func (r *InstancesService) SetServiceAccount(project string, zone string, instan // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { +func (c *InstancesSetSchedulingCall) RequestId(requestId string) *InstancesSetSchedulingCall { c.urlParams_.Set("requestId", requestId) return c } @@ -102151,7 +104909,7 @@ func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesS // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { +func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102159,21 +104917,21 @@ func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *Instances // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { +func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetSchedulingCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetServiceAccountCall) Header() http.Header { +func (c *InstancesSetSchedulingCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -102181,14 +104939,14 @@ func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setScheduling") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -102203,14 +104961,14 @@ func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setServiceAccount" call. +// Do executes the "compute.instances.setScheduling" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102241,10 +104999,10 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper } return ret, nil // { - // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", + // "description": "Sets an instance's scheduling options. You can only call this method on a stopped instance, that is, a VM instance that is in a `TERMINATED` state. See Instance Life Cycle for more information on the possible instance states. For more information about setting scheduling options for a VM, see Set VM host maintenance policy.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setScheduling", // "httpMethod": "POST", - // "id": "compute.instances.setServiceAccount", + // "id": "compute.instances.setScheduling", // "parameterOrder": [ // "project", // "zone", @@ -102252,7 +105010,7 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper // ], // "parameters": { // "instance": { - // "description": "Name of the instance resource to start.", + // "description": "Instance name for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -102278,9 +105036,9 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setScheduling", // "request": { - // "$ref": "InstancesSetServiceAccountRequest" + // "$ref": "Scheduling" // }, // "response": { // "$ref": "Operation" @@ -102293,33 +105051,32 @@ func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Oper } -// method id "compute.instances.setShieldedInstanceIntegrityPolicy": +// method id "compute.instances.setServiceAccount": -type InstancesSetShieldedInstanceIntegrityPolicyCall struct { - s *Service - project string - zone string - instance string - shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetServiceAccountCall struct { + s *Service + project string + zone string + instance string + instancessetserviceaccountrequest *InstancesSetServiceAccountRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance -// integrity policy for an instance. You can only use this method on a -// running instance. This method supports PATCH semantics and uses the -// JSON merge patch format and processing rules. +// SetServiceAccount: Sets the service account on the instance. For more +// information, read Changing the service account and access scopes for +// an instance. // -// - instance: Name or id of the instance scoping this request. +// - instance: Name of the instance resource to start. // - project: Project ID for this request. // - zone: The name of the zone for this request. -func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { - c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstancesService) SetServiceAccount(project string, zone string, instance string, instancessetserviceaccountrequest *InstancesSetServiceAccountRequest) *InstancesSetServiceAccountCall { + c := &InstancesSetServiceAccountCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy + c.instancessetserviceaccountrequest = instancessetserviceaccountrequest return c } @@ -102334,7 +105091,7 @@ func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zo // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { +func (c *InstancesSetServiceAccountCall) RequestId(requestId string) *InstancesSetServiceAccountCall { c.urlParams_.Set("requestId", requestId) return c } @@ -102342,7 +105099,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId st // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { +func (c *InstancesSetServiceAccountCall) Fields(s ...googleapi.Field) *InstancesSetServiceAccountCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102350,21 +105107,21 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi. // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { +func (c *InstancesSetServiceAccountCall) Context(ctx context.Context) *InstancesSetServiceAccountCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { +func (c *InstancesSetServiceAccountCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetServiceAccountCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -102372,16 +105129,16 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetserviceaccountrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("PATCH", urls, body) + req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } @@ -102394,14 +105151,14 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. +// Do executes the "compute.instances.setServiceAccount" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetServiceAccountCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102432,10 +105189,10 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C } return ret, nil // { - // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", - // "httpMethod": "PATCH", - // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", + // "description": "Sets the service account on the instance. For more information, read Changing the service account and access scopes for an instance.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", + // "httpMethod": "POST", + // "id": "compute.instances.setServiceAccount", // "parameterOrder": [ // "project", // "zone", @@ -102443,7 +105200,7 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C // ], // "parameters": { // "instance": { - // "description": "Name or id of the instance scoping this request.", + // "description": "Name of the instance resource to start.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -102469,9 +105226,9 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount", // "request": { - // "$ref": "ShieldedInstanceIntegrityPolicy" + // "$ref": "InstancesSetServiceAccountRequest" // }, // "response": { // "$ref": "Operation" @@ -102484,31 +105241,33 @@ func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.C } -// method id "compute.instances.setTags": +// method id "compute.instances.setShieldedInstanceIntegrityPolicy": -type InstancesSetTagsCall struct { - s *Service - project string - zone string - instance string - tags *Tags - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type InstancesSetShieldedInstanceIntegrityPolicyCall struct { + s *Service + project string + zone string + instance string + shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// SetTags: Sets network tags for the specified instance to the data -// included in the request. +// SetShieldedInstanceIntegrityPolicy: Sets the Shielded Instance +// integrity policy for an instance. You can only use this method on a +// running instance. This method supports PATCH semantics and uses the +// JSON merge patch format and processing rules. // -// - instance: Name of the instance scoping this request. +// - instance: Name or id of the instance scoping this request. // - project: Project ID for this request. // - zone: The name of the zone for this request. -func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { - c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstancesService) SetShieldedInstanceIntegrityPolicy(project string, zone string, instance string, shieldedinstanceintegritypolicy *ShieldedInstanceIntegrityPolicy) *InstancesSetShieldedInstanceIntegrityPolicyCall { + c := &InstancesSetShieldedInstanceIntegrityPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance - c.tags = tags + c.shieldedinstanceintegritypolicy = shieldedinstanceintegritypolicy return c } @@ -102523,7 +105282,7 @@ func (r *InstancesService) SetTags(project string, zone string, instance string, // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) RequestId(requestId string) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.urlParams_.Set("requestId", requestId) return c } @@ -102531,7 +105290,7 @@ func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Fields(s ...googleapi.Field) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102539,21 +105298,21 @@ func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCal // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Context(ctx context.Context) *InstancesSetShieldedInstanceIntegrityPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSetTagsCall) Header() http.Header { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -102561,16 +105320,16 @@ func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) + body, err := googleapi.WithoutDataWrapper.JSONReader(c.shieldedinstanceintegritypolicy) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setTags") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } @@ -102583,14 +105342,14 @@ func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.setTags" call. +// Do executes the "compute.instances.setShieldedInstanceIntegrityPolicy" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetShieldedInstanceIntegrityPolicyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102621,10 +105380,10 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } return ret, nil // { - // "description": "Sets network tags for the specified instance to the data included in the request.", - // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setTags", - // "httpMethod": "POST", - // "id": "compute.instances.setTags", + // "description": "Sets the Shielded Instance integrity policy for an instance. You can only use this method on a running instance. This method supports PATCH semantics and uses the JSON merge patch format and processing rules.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", + // "httpMethod": "PATCH", + // "id": "compute.instances.setShieldedInstanceIntegrityPolicy", // "parameterOrder": [ // "project", // "zone", @@ -102632,7 +105391,7 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err // ], // "parameters": { // "instance": { - // "description": "Name of the instance scoping this request.", + // "description": "Name or id of the instance scoping this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, @@ -102658,9 +105417,9 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err // "type": "string" // } // }, - // "path": "projects/{project}/zones/{zone}/instances/{instance}/setTags", + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy", // "request": { - // "$ref": "Tags" + // "$ref": "ShieldedInstanceIntegrityPolicy" // }, // "response": { // "$ref": "Operation" @@ -102673,36 +105432,54 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err } -// method id "compute.instances.simulateMaintenanceEvent": +// method id "compute.instances.setTags": -type InstancesSimulateMaintenanceEventCall struct { +type InstancesSetTagsCall struct { s *Service project string zone string instance string + tags *Tags urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } -// SimulateMaintenanceEvent: Simulates a host maintenance event on a VM. -// For more information, see Simulate a host maintenance event. +// SetTags: Sets network tags for the specified instance to the data +// included in the request. // // - instance: Name of the instance scoping this request. // - project: Project ID for this request. // - zone: The name of the zone for this request. -func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { - c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall { + c := &InstancesSetTagsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.zone = zone c.instance = instance + c.tags = tags + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesSetTagsCall) RequestId(requestId string) *InstancesSetTagsCall { + c.urlParams_.Set("requestId", requestId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { +func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -102710,21 +105487,21 @@ func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *In // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { +func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { +func (c *InstancesSetTagsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { +func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -102732,9 +105509,14 @@ func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Res } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/setTags") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { @@ -102749,14 +105531,196 @@ func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Res return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.instances.simulateMaintenanceEvent" call. +// Do executes the "compute.instances.setTags" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets network tags for the specified instance to the data included in the request.", + // "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/setTags", + // "httpMethod": "POST", + // "id": "compute.instances.setTags", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/instances/{instance}/setTags", + // "request": { + // "$ref": "Tags" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.instances.simulateMaintenanceEvent": + +type InstancesSimulateMaintenanceEventCall struct { + s *Service + project string + zone string + instance string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SimulateMaintenanceEvent: Simulates a host maintenance event on a VM. +// For more information, see Simulate a host maintenance event. +// +// - instance: Name of the instance scoping this request. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *InstancesService) SimulateMaintenanceEvent(project string, zone string, instance string) *InstancesSimulateMaintenanceEventCall { + c := &InstancesSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *InstancesSimulateMaintenanceEventCall) RequestId(requestId string) *InstancesSimulateMaintenanceEventCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *InstancesSimulateMaintenanceEventCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesSimulateMaintenanceEventCall) Context(ctx context.Context) *InstancesSimulateMaintenanceEventCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InstancesSimulateMaintenanceEventCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InstancesSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.simulateMaintenanceEvent" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -102811,6 +105775,11 @@ func (c *InstancesSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) // "required": true, // "type": "string" // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, // "zone": { // "description": "The name of the zone for this request.", // "location": "path", @@ -106761,6 +109730,449 @@ func (c *InterconnectLocationsListCall) Pages(ctx context.Context, f func(*Inter } } +// method id "compute.interconnectRemoteLocations.get": + +type InterconnectRemoteLocationsGetCall struct { + s *Service + project string + interconnectRemoteLocation string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the details for the specified interconnect remote +// location. Gets a list of available interconnect remote locations by +// making a list() request. +// +// - interconnectRemoteLocation: Name of the interconnect remote +// location to return. +// - project: Project ID for this request. +func (r *InterconnectRemoteLocationsService) Get(project string, interconnectRemoteLocation string) *InterconnectRemoteLocationsGetCall { + c := &InterconnectRemoteLocationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.interconnectRemoteLocation = interconnectRemoteLocation + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectRemoteLocationsGetCall) Fields(s ...googleapi.Field) *InterconnectRemoteLocationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectRemoteLocationsGetCall) IfNoneMatch(entityTag string) *InterconnectRemoteLocationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectRemoteLocationsGetCall) Context(ctx context.Context) *InterconnectRemoteLocationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectRemoteLocationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectRemoteLocationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "interconnectRemoteLocation": c.interconnectRemoteLocation, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectRemoteLocations.get" call. +// Exactly one of *InterconnectRemoteLocation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *InterconnectRemoteLocation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectRemoteLocationsGetCall) Do(opts ...googleapi.CallOption) (*InterconnectRemoteLocation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &InterconnectRemoteLocation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the details for the specified interconnect remote location. Gets a list of available interconnect remote locations by making a list() request.", + // "flatPath": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", + // "httpMethod": "GET", + // "id": "compute.interconnectRemoteLocations.get", + // "parameterOrder": [ + // "project", + // "interconnectRemoteLocation" + // ], + // "parameters": { + // "interconnectRemoteLocation": { + // "description": "Name of the interconnect remote location to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/global/interconnectRemoteLocations/{interconnectRemoteLocation}", + // "response": { + // "$ref": "InterconnectRemoteLocation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.interconnectRemoteLocations.list": + +type InterconnectRemoteLocationsListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves the list of interconnect remote locations available +// to the specified project. +// +// - project: Project ID for this request. +func (r *InterconnectRemoteLocationsService) List(project string) *InterconnectRemoteLocationsListCall { + c := &InterconnectRemoteLocationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *InterconnectRemoteLocationsListCall) Filter(filter string) *InterconnectRemoteLocationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *InterconnectRemoteLocationsListCall) MaxResults(maxResults int64) *InterconnectRemoteLocationsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *InterconnectRemoteLocationsListCall) OrderBy(orderBy string) *InterconnectRemoteLocationsListCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *InterconnectRemoteLocationsListCall) PageToken(pageToken string) *InterconnectRemoteLocationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *InterconnectRemoteLocationsListCall) ReturnPartialSuccess(returnPartialSuccess bool) *InterconnectRemoteLocationsListCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InterconnectRemoteLocationsListCall) Fields(s ...googleapi.Field) *InterconnectRemoteLocationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *InterconnectRemoteLocationsListCall) IfNoneMatch(entityTag string) *InterconnectRemoteLocationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InterconnectRemoteLocationsListCall) Context(ctx context.Context) *InterconnectRemoteLocationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *InterconnectRemoteLocationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *InterconnectRemoteLocationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/global/interconnectRemoteLocations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.interconnectRemoteLocations.list" call. +// Exactly one of *InterconnectRemoteLocationList or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *InterconnectRemoteLocationList.ServerResponse.Header or (if a +// response was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *InterconnectRemoteLocationsListCall) Do(opts ...googleapi.CallOption) (*InterconnectRemoteLocationList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &InterconnectRemoteLocationList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves the list of interconnect remote locations available to the specified project.", + // "flatPath": "projects/{project}/global/interconnectRemoteLocations", + // "httpMethod": "GET", + // "id": "compute.interconnectRemoteLocations.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/global/interconnectRemoteLocations", + // "response": { + // "$ref": "InterconnectRemoteLocationList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *InterconnectRemoteLocationsListCall) Pages(ctx context.Context, f func(*InterconnectRemoteLocationList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "compute.interconnects.delete": type InterconnectsDeleteCall struct { @@ -109807,8 +113219,7 @@ type MachineImagesGetCall struct { header_ http.Header } -// Get: Returns the specified machine image. Gets a list of available -// machine images by making a list() request. +// Get: Returns the specified machine image. // // - machineImage: The name of the machine image. // - project: Project ID for this request. @@ -109919,7 +113330,7 @@ func (c *MachineImagesGetCall) Do(opts ...googleapi.CallOption) (*MachineImage, } return ret, nil // { - // "description": "Returns the specified machine image. Gets a list of available machine images by making a list() request.", + // "description": "Returns the specified machine image.", // "flatPath": "projects/{project}/global/machineImages/{machineImage}", // "httpMethod": "GET", // "id": "compute.machineImages.get", @@ -111214,8 +114625,7 @@ type MachineTypesGetCall struct { header_ http.Header } -// Get: Returns the specified machine type. Gets a list of available -// machine types by making a list() request. +// Get: Returns the specified machine type. // // - machineType: Name of the machine type to return. // - project: Project ID for this request. @@ -111329,7 +114739,7 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er } return ret, nil // { - // "description": "Returns the specified machine type. Gets a list of available machine types by making a list() request.", + // "description": "Returns the specified machine type.", // "flatPath": "projects/{project}/zones/{zone}/machineTypes/{machineType}", // "httpMethod": "GET", // "id": "compute.machineTypes.get", @@ -115237,8 +118647,7 @@ type NetworkEndpointGroupsGetCall struct { header_ http.Header } -// Get: Returns the specified network endpoint group. Gets a list of -// available network endpoint groups by making a list() request. +// Get: Returns the specified network endpoint group. // // - networkEndpointGroup: The name of the network endpoint group. It // should comply with RFC1035. @@ -115354,7 +118763,7 @@ func (c *NetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (*Networ } return ret, nil // { - // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + // "description": "Returns the specified network endpoint group.", // "flatPath": "projects/{project}/zones/{zone}/networkEndpointGroups/{networkEndpointGroup}", // "httpMethod": "GET", // "id": "compute.networkEndpointGroups.get", @@ -119597,8 +123006,7 @@ type NetworksGetCall struct { header_ http.Header } -// Get: Returns the specified network. Gets a list of available networks -// by making a list() request. +// Get: Returns the specified network. // // - network: Name of the network to return. // - project: Project ID for this request. @@ -119709,7 +123117,7 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { } return ret, nil // { - // "description": "Returns the specified network. Gets a list of available networks by making a list() request.", + // "description": "Returns the specified network.", // "flatPath": "projects/{project}/global/networks/{network}", // "httpMethod": "GET", // "id": "compute.networks.get", @@ -123923,6 +127331,196 @@ func (c *NodeGroupsSetNodeTemplateCall) Do(opts ...googleapi.CallOption) (*Opera } +// method id "compute.nodeGroups.simulateMaintenanceEvent": + +type NodeGroupsSimulateMaintenanceEventCall struct { + s *Service + project string + zone string + nodeGroup string + nodegroupssimulatemaintenanceeventrequest *NodeGroupsSimulateMaintenanceEventRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SimulateMaintenanceEvent: Simulates maintenance event on specified +// nodes from the node group. +// +// - nodeGroup: Name of the NodeGroup resource whose nodes will go under +// maintenance simulation. +// - project: Project ID for this request. +// - zone: The name of the zone for this request. +func (r *NodeGroupsService) SimulateMaintenanceEvent(project string, zone string, nodeGroup string, nodegroupssimulatemaintenanceeventrequest *NodeGroupsSimulateMaintenanceEventRequest) *NodeGroupsSimulateMaintenanceEventCall { + c := &NodeGroupsSimulateMaintenanceEventCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.nodeGroup = nodeGroup + c.nodegroupssimulatemaintenanceeventrequest = nodegroupssimulatemaintenanceeventrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *NodeGroupsSimulateMaintenanceEventCall) RequestId(requestId string) *NodeGroupsSimulateMaintenanceEventCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NodeGroupsSimulateMaintenanceEventCall) Fields(s ...googleapi.Field) *NodeGroupsSimulateMaintenanceEventCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NodeGroupsSimulateMaintenanceEventCall) Context(ctx context.Context) *NodeGroupsSimulateMaintenanceEventCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NodeGroupsSimulateMaintenanceEventCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NodeGroupsSimulateMaintenanceEventCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.nodegroupssimulatemaintenanceeventrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "nodeGroup": c.nodeGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.nodeGroups.simulateMaintenanceEvent" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NodeGroupsSimulateMaintenanceEventCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Simulates maintenance event on specified nodes from the node group.", + // "flatPath": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent", + // "httpMethod": "POST", + // "id": "compute.nodeGroups.simulateMaintenanceEvent", + // "parameterOrder": [ + // "project", + // "zone", + // "nodeGroup" + // ], + // "parameters": { + // "nodeGroup": { + // "description": "Name of the NodeGroup resource whose nodes will go under maintenance simulation.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/zones/{zone}/nodeGroups/{nodeGroup}/simulateMaintenanceEvent", + // "request": { + // "$ref": "NodeGroupsSimulateMaintenanceEventRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.nodeGroups.testIamPermissions": type NodeGroupsTestIamPermissionsCall struct { @@ -124580,8 +128178,7 @@ type NodeTemplatesGetCall struct { header_ http.Header } -// Get: Returns the specified node template. Gets a list of available -// node templates by making a list() request. +// Get: Returns the specified node template. // // - nodeTemplate: Name of the node template to return. // - project: Project ID for this request. @@ -124695,7 +128292,7 @@ func (c *NodeTemplatesGetCall) Do(opts ...googleapi.CallOption) (*NodeTemplate, } return ret, nil // { - // "description": "Returns the specified node template. Gets a list of available node templates by making a list() request.", + // "description": "Returns the specified node template.", // "flatPath": "projects/{project}/regions/{region}/nodeTemplates/{nodeTemplate}", // "httpMethod": "GET", // "id": "compute.nodeTemplates.get", @@ -126042,8 +129639,7 @@ type NodeTypesGetCall struct { header_ http.Header } -// Get: Returns the specified node type. Gets a list of available node -// types by making a list() request. +// Get: Returns the specified node type. // // - nodeType: Name of the node type to return. // - project: Project ID for this request. @@ -126157,7 +129753,7 @@ func (c *NodeTypesGetCall) Do(opts ...googleapi.CallOption) (*NodeType, error) { } return ret, nil // { - // "description": "Returns the specified node type. Gets a list of available node types by making a list() request.", + // "description": "Returns the specified node type.", // "flatPath": "projects/{project}/zones/{zone}/nodeTypes/{nodeType}", // "httpMethod": "GET", // "id": "compute.nodeTypes.get", @@ -135800,8 +139396,7 @@ type RegionCommitmentsGetCall struct { header_ http.Header } -// Get: Returns the specified commitment resource. Gets a list of -// available commitments by making a list() request. +// Get: Returns the specified commitment resource. // // - commitment: Name of the commitment to return. // - project: Project ID for this request. @@ -135915,7 +139510,7 @@ func (c *RegionCommitmentsGetCall) Do(opts ...googleapi.CallOption) (*Commitment } return ret, nil // { - // "description": "Returns the specified commitment resource. Gets a list of available commitments by making a list() request.", + // "description": "Returns the specified commitment resource.", // "flatPath": "projects/{project}/regions/{region}/commitments/{commitment}", // "httpMethod": "GET", // "id": "compute.regionCommitments.get", @@ -136658,8 +140253,7 @@ type RegionDiskTypesGetCall struct { header_ http.Header } -// Get: Returns the specified regional disk type. Gets a list of -// available disk types by making a list() request. +// Get: Returns the specified regional disk type. // // - diskType: Name of the disk type to return. // - project: Project ID for this request. @@ -136773,7 +140367,7 @@ func (c *RegionDiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, er } return ret, nil // { - // "description": "Returns the specified regional disk type. Gets a list of available disk types by making a list() request.", + // "description": "Returns the specified regional disk type.", // "flatPath": "projects/{project}/regions/{region}/diskTypes/{diskType}", // "httpMethod": "GET", // "id": "compute.regionDiskTypes.get", @@ -139412,29 +143006,39 @@ func (c *RegionDisksTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*T } -// method id "compute.regionHealthCheckServices.delete": +// method id "compute.regionDisks.update": -type RegionHealthCheckServicesDeleteCall struct { - s *Service - project string - region string - healthCheckService string - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header +type RegionDisksUpdateCall struct { + s *Service + project string + region string + disk string + disk2 *Disk + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header } -// Delete: Deletes the specified regional HealthCheckService. +// Update: Update the specified disk with the data included in the +// request. Update is performed only on selected fields included as part +// of update-mask. Only the following fields can be modified: +// user_license. // -// - healthCheckService: Name of the HealthCheckService to delete. The -// name must be 1-63 characters long, and comply with RFC1035. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionHealthCheckServicesService) Delete(project string, region string, healthCheckService string) *RegionHealthCheckServicesDeleteCall { - c := &RegionHealthCheckServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} +// - disk: The disk name for this request. +// - project: Project ID for this request. +// - region: The name of the region for this request. +func (r *RegionDisksService) Update(project string, region string, disk string, disk2 *Disk) *RegionDisksUpdateCall { + c := &RegionDisksUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region - c.healthCheckService = healthCheckService + c.disk = disk + c.disk2 = disk2 + return c +} + +// Paths sets the optional parameter "paths": +func (c *RegionDisksUpdateCall) Paths(paths ...string) *RegionDisksUpdateCall { + c.urlParams_.SetMulti("paths", append([]string{}, paths...)) return c } @@ -139449,15 +143053,22 @@ func (r *RegionHealthCheckServicesService) Delete(project string, region string, // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionHealthCheckServicesDeleteCall) RequestId(requestId string) *RegionHealthCheckServicesDeleteCall { +func (c *RegionDisksUpdateCall) RequestId(requestId string) *RegionDisksUpdateCall { c.urlParams_.Set("requestId", requestId) return c } +// UpdateMask sets the optional parameter "updateMask": update_mask +// indicates fields to be updated as part of this request. +func (c *RegionDisksUpdateCall) UpdateMask(updateMask string) *RegionDisksUpdateCall { + c.urlParams_.Set("updateMask", updateMask) + return c +} + // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthCheckServicesDeleteCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesDeleteCall { +func (c *RegionDisksUpdateCall) Fields(s ...googleapi.Field) *RegionDisksUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -139465,21 +143076,21 @@ func (c *RegionHealthCheckServicesDeleteCall) Fields(s ...googleapi.Field) *Regi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthCheckServicesDeleteCall) Context(ctx context.Context) *RegionHealthCheckServicesDeleteCall { +func (c *RegionDisksUpdateCall) Context(ctx context.Context) *RegionDisksUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthCheckServicesDeleteCall) Header() http.Header { +func (c *RegionDisksUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthCheckServicesDeleteCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionDisksUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -139487,31 +143098,36 @@ func (c *RegionHealthCheckServicesDeleteCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/disks/{disk}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("DELETE", urls, body) + req, err := http.NewRequest("PATCH", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "healthCheckService": c.healthCheckService, + "project": c.project, + "region": c.region, + "disk": c.disk, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthCheckServices.delete" call. +// Do executes the "compute.regionDisks.update" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionHealthCheckServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionDisksUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -139542,22 +143158,28 @@ func (c *RegionHealthCheckServicesDeleteCall) Do(opts ...googleapi.CallOption) ( } return ret, nil // { - // "description": "Deletes the specified regional HealthCheckService.", - // "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", - // "httpMethod": "DELETE", - // "id": "compute.regionHealthCheckServices.delete", + // "description": "Update the specified disk with the data included in the request. Update is performed only on selected fields included as part of update-mask. Only the following fields can be modified: user_license.", + // "flatPath": "projects/{project}/regions/{region}/disks/{disk}", + // "httpMethod": "PATCH", + // "id": "compute.regionDisks.update", // "parameterOrder": [ // "project", // "region", - // "healthCheckService" + // "disk" // ], // "parameters": { - // "healthCheckService": { - // "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", + // "disk": { + // "description": "The disk name for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", // "required": true, // "type": "string" // }, + // "paths": { + // "location": "query", + // "repeated": true, + // "type": "string" + // }, // "project": { // "description": "Project ID for this request.", // "location": "path", @@ -139566,7 +143188,7 @@ func (c *RegionHealthCheckServicesDeleteCall) Do(opts ...googleapi.CallOption) ( // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the region for this request.", // "location": "path", // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, @@ -139576,9 +143198,18 @@ func (c *RegionHealthCheckServicesDeleteCall) Do(opts ...googleapi.CallOption) ( // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", // "location": "query", // "type": "string" + // }, + // "updateMask": { + // "description": "update_mask indicates fields to be updated as part of this request.", + // "format": "google-fieldmask", + // "location": "query", + // "type": "string" // } // }, - // "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + // "path": "projects/{project}/regions/{region}/disks/{disk}", + // "request": { + // "$ref": "Disk" + // }, // "response": { // "$ref": "Operation" // }, @@ -139590,203 +143221,32 @@ func (c *RegionHealthCheckServicesDeleteCall) Do(opts ...googleapi.CallOption) ( } -// method id "compute.regionHealthCheckServices.get": +// method id "compute.regionHealthCheckServices.delete": -type RegionHealthCheckServicesGetCall struct { +type RegionHealthCheckServicesDeleteCall struct { s *Service project string region string healthCheckService string urlParams_ gensupport.URLParams - ifNoneMatch_ string ctx_ context.Context header_ http.Header } -// Get: Returns the specified regional HealthCheckService resource. +// Delete: Deletes the specified regional HealthCheckService. // -// - healthCheckService: Name of the HealthCheckService to update. The +// - healthCheckService: Name of the HealthCheckService to delete. The // name must be 1-63 characters long, and comply with RFC1035. // - project: Project ID for this request. // - region: Name of the region scoping this request. -func (r *RegionHealthCheckServicesService) Get(project string, region string, healthCheckService string) *RegionHealthCheckServicesGetCall { - c := &RegionHealthCheckServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} +func (r *RegionHealthCheckServicesService) Delete(project string, region string, healthCheckService string) *RegionHealthCheckServicesDeleteCall { + c := &RegionHealthCheckServicesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project c.region = region c.healthCheckService = healthCheckService return c } -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionHealthCheckServicesGetCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesGetCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// IfNoneMatch sets the optional parameter which makes the operation -// fail if the object's ETag matches the given value. This is useful for -// getting updates only after the object has changed since the last -// request. Use googleapi.IsNotModified to check whether the response -// error from Do is the result of In-None-Match. -func (c *RegionHealthCheckServicesGetCall) IfNoneMatch(entityTag string) *RegionHealthCheckServicesGetCall { - c.ifNoneMatch_ = entityTag - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionHealthCheckServicesGetCall) Context(ctx context.Context) *RegionHealthCheckServicesGetCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionHealthCheckServicesGetCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionHealthCheckServicesGetCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - reqHeaders.Set("If-None-Match", c.ifNoneMatch_) - } - var body io.Reader = nil - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("GET", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "healthCheckService": c.healthCheckService, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionHealthCheckServices.get" call. -// Exactly one of *HealthCheckService or error will be non-nil. Any -// non-2xx status code is an error. Response headers are in either -// *HealthCheckService.ServerResponse.Header or (if a response was -// returned at all) in error.(*googleapi.Error).Header. Use -// googleapi.IsNotModified to check whether the returned error was -// because http.StatusNotModified was returned. -func (c *RegionHealthCheckServicesGetCall) Do(opts ...googleapi.CallOption) (*HealthCheckService, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &HealthCheckService{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Returns the specified regional HealthCheckService resource.", - // "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", - // "httpMethod": "GET", - // "id": "compute.regionHealthCheckServices.get", - // "parameterOrder": [ - // "project", - // "region", - // "healthCheckService" - // ], - // "parameters": { - // "healthCheckService": { - // "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", - // "required": true, - // "type": "string" - // } - // }, - // "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", - // "response": { - // "$ref": "HealthCheckService" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute", - // "https://www.googleapis.com/auth/compute.readonly" - // ] - // } - -} - -// method id "compute.regionHealthCheckServices.insert": - -type RegionHealthCheckServicesInsertCall struct { - s *Service - project string - region string - healthcheckservice *HealthCheckService - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// Insert: Creates a regional HealthCheckService resource in the -// specified project and region using the data included in the request. -// -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionHealthCheckServicesService) Insert(project string, region string, healthcheckservice *HealthCheckService) *RegionHealthCheckServicesInsertCall { - c := &RegionHealthCheckServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.healthcheckservice = healthcheckservice - return c -} - // RequestId sets the optional parameter "requestId": An optional // request ID to identify requests. Specify a unique request ID so that // if you must retry your request, the server will know to ignore the @@ -139798,7 +143258,7 @@ func (r *RegionHealthCheckServicesService) Insert(project string, region string, // clients from accidentally creating duplicate commitments. The request // ID must be a valid UUID with the exception that zero UUID is not // supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionHealthCheckServicesInsertCall) RequestId(requestId string) *RegionHealthCheckServicesInsertCall { +func (c *RegionHealthCheckServicesDeleteCall) RequestId(requestId string) *RegionHealthCheckServicesDeleteCall { c.urlParams_.Set("requestId", requestId) return c } @@ -139806,7 +143266,7 @@ func (c *RegionHealthCheckServicesInsertCall) RequestId(requestId string) *Regio // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionHealthCheckServicesInsertCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesInsertCall { +func (c *RegionHealthCheckServicesDeleteCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } @@ -139814,21 +143274,21 @@ func (c *RegionHealthCheckServicesInsertCall) Fields(s ...googleapi.Field) *Regi // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionHealthCheckServicesInsertCall) Context(ctx context.Context) *RegionHealthCheckServicesInsertCall { +func (c *RegionHealthCheckServicesDeleteCall) Context(ctx context.Context) *RegionHealthCheckServicesDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionHealthCheckServicesInsertCall) Header() http.Header { +func (c *RegionHealthCheckServicesDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionHealthCheckServicesInsertCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionHealthCheckServicesDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { @@ -139836,35 +143296,384 @@ func (c *RegionHealthCheckServicesInsertCall) doRequest(alt string) (*http.Respo } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheckservice) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthCheckServices") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, + "project": c.project, + "region": c.region, + "healthCheckService": c.healthCheckService, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionHealthCheckServices.insert" call. +// Do executes the "compute.regionHealthCheckServices.delete" call. // Exactly one of *Operation or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Operation.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. -func (c *RegionHealthCheckServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { +func (c *RegionHealthCheckServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified regional HealthCheckService.", + // "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + // "httpMethod": "DELETE", + // "id": "compute.regionHealthCheckServices.delete", + // "parameterOrder": [ + // "project", + // "region", + // "healthCheckService" + // ], + // "parameters": { + // "healthCheckService": { + // "description": "Name of the HealthCheckService to delete. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionHealthCheckServices.get": + +type RegionHealthCheckServicesGetCall struct { + s *Service + project string + region string + healthCheckService string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified regional HealthCheckService resource. +// +// - healthCheckService: Name of the HealthCheckService to update. The +// name must be 1-63 characters long, and comply with RFC1035. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionHealthCheckServicesService) Get(project string, region string, healthCheckService string) *RegionHealthCheckServicesGetCall { + c := &RegionHealthCheckServicesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.healthCheckService = healthCheckService + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionHealthCheckServicesGetCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionHealthCheckServicesGetCall) IfNoneMatch(entityTag string) *RegionHealthCheckServicesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionHealthCheckServicesGetCall) Context(ctx context.Context) *RegionHealthCheckServicesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionHealthCheckServicesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionHealthCheckServicesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "healthCheckService": c.healthCheckService, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionHealthCheckServices.get" call. +// Exactly one of *HealthCheckService or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HealthCheckService.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionHealthCheckServicesGetCall) Do(opts ...googleapi.CallOption) (*HealthCheckService, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &HealthCheckService{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified regional HealthCheckService resource.", + // "flatPath": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + // "httpMethod": "GET", + // "id": "compute.regionHealthCheckServices.get", + // "parameterOrder": [ + // "project", + // "region", + // "healthCheckService" + // ], + // "parameters": { + // "healthCheckService": { + // "description": "Name of the HealthCheckService to update. The name must be 1-63 characters long, and comply with RFC1035.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/healthCheckServices/{healthCheckService}", + // "response": { + // "$ref": "HealthCheckService" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionHealthCheckServices.insert": + +type RegionHealthCheckServicesInsertCall struct { + s *Service + project string + region string + healthcheckservice *HealthCheckService + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a regional HealthCheckService resource in the +// specified project and region using the data included in the request. +// +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionHealthCheckServicesService) Insert(project string, region string, healthcheckservice *HealthCheckService) *RegionHealthCheckServicesInsertCall { + c := &RegionHealthCheckServicesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.healthcheckservice = healthcheckservice + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionHealthCheckServicesInsertCall) RequestId(requestId string) *RegionHealthCheckServicesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionHealthCheckServicesInsertCall) Fields(s ...googleapi.Field) *RegionHealthCheckServicesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionHealthCheckServicesInsertCall) Context(ctx context.Context) *RegionHealthCheckServicesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionHealthCheckServicesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionHealthCheckServicesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.healthcheckservice) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/healthCheckServices") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionHealthCheckServices.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionHealthCheckServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -140612,8 +144421,7 @@ type RegionHealthChecksGetCall struct { header_ http.Header } -// Get: Returns the specified HealthCheck resource. Gets a list of -// available health checks by making a list() request. +// Get: Returns the specified HealthCheck resource. // // - healthCheck: Name of the HealthCheck resource to return. // - project: Project ID for this request. @@ -140727,7 +144535,7 @@ func (c *RegionHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HealthChe } return ret, nil // { - // "description": "Returns the specified HealthCheck resource. Gets a list of available health checks by making a list() request.", + // "description": "Returns the specified HealthCheck resource.", // "flatPath": "projects/{project}/regions/{region}/healthChecks/{healthCheck}", // "httpMethod": "GET", // "id": "compute.regionHealthChecks.get", @@ -146139,7 +149947,1019 @@ func (r *RegionInstanceGroupsService) ListInstances(project string, region strin // must match the entire field. For example, to filter for instances // that do not end with name "instance", you would use `name ne // .*instance`. -func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { +func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than `maxResults`, Compute Engine returns +// a `nextPageToken` that can be used to get the next page of results in +// subsequent list requests. Acceptable values are `0` to `500`, +// inclusive. (Default: `500`) +func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// OrderBy sets the optional parameter "orderBy": Sorts list results by +// a certain order. By default, results are returned in alphanumerical +// order based on the resource name. You can also sort results in +// descending order based on the creation timestamp using +// `orderBy="creationTimestamp desc". This sorts results based on the +// `creationTimestamp` field in reverse chronological order (newest +// result first). Use this to sort resources like operations so that the +// newest operation is returned first. Currently, only sorting by `name` +// or `creationTimestamp desc` is supported. +func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("orderBy", orderBy) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set `pageToken` to the `nextPageToken` returned by a +// previous list request to get the next page of results. +func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ReturnPartialSuccess sets the optional parameter +// "returnPartialSuccess": Opt-in for partial success behavior which +// provides partial results in case of failure. The default value is +// false. +func (c *RegionInstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroups.listInstances" call. +// Exactly one of *RegionInstanceGroupsListInstances or error will be +// non-nil. Any non-2xx status code is an error. Response headers are in +// either *RegionInstanceGroupsListInstances.ServerResponse.Header or +// (if a response was returned at all) in +// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check +// whether the returned error was because http.StatusNotModified was +// returned. +func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &RegionInstanceGroupsListInstances{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroups.listInstances", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroup" + // ], + // "parameters": { + // "filter": { + // "description": "A filter expression that filters resources listed in the response. Most Compute resources support two types of filter expressions: expressions that support regular expressions and expressions that follow API improvement proposal AIP-160. If you want to use AIP-160, your expression must specify the field name, an operator, and the value that you want to use for filtering. The value must be a string, a number, or a boolean. The operator must be either `=`, `!=`, `\u003e`, `\u003c`, `\u003c=`, `\u003e=` or `:`. For example, if you are filtering Compute Engine instances, you can exclude instances named `example-instance` by specifying `name != example-instance`. The `:` operator can be used with string fields to match substrings. For non-string fields it is equivalent to the `=` operator. The `:*` comparison can be used to test whether a key has been defined. For example, to find all objects with `owner` label use: ``` labels.owner:* ``` You can also filter nested fields. For example, you could specify `scheduling.automaticRestart = false` to include instances only if they are not scheduled for automatic restarts. You can use filtering on nested fields to filter based on resource labels. To filter on multiple expressions, provide each separate expression within parentheses. For example: ``` (scheduling.automaticRestart = true) (cpuPlatform = \"Intel Skylake\") ``` By default, each expression is an `AND` expression. However, you can include `AND` and `OR` expressions explicitly. For example: ``` (cpuPlatform = \"Intel Skylake\") OR (cpuPlatform = \"Intel Broadwell\") AND (scheduling.automaticRestart = true) ``` If you want to use a regular expression, use the `eq` (equal) or `ne` (not equal) operator against a single un-parenthesized expression with or without quotes or against multiple parenthesized expressions. Examples: `fieldname eq unquoted literal` `fieldname eq 'single quoted literal'` `fieldname eq \"double quoted literal\"` `(fieldname1 eq literal) (fieldname2 ne \"literal\")` The literal value is interpreted as a regular expression using Google RE2 library syntax. The literal value must match the entire field. For example, to filter for instances that do not end with name \"instance\", you would use `name ne .*instance`.", + // "location": "query", + // "type": "string" + // }, + // "instanceGroup": { + // "description": "Name of the regional instance group for which we want to list the instances.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "orderBy": { + // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", + // "location": "query", + // "type": "string" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set `pageToken` to the `nextPageToken` returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "returnPartialSuccess": { + // "description": "Opt-in for partial success behavior which provides partial results in case of failure. The default value is false.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", + // "request": { + // "$ref": "RegionInstanceGroupsListInstancesRequest" + // }, + // "response": { + // "$ref": "RegionInstanceGroupsListInstances" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.regionInstanceGroups.setNamedPorts": + +type RegionInstanceGroupsSetNamedPortsCall struct { + s *Service + project string + region string + instanceGroup string + regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetNamedPorts: Sets the named ports for the specified regional +// instance group. +// +// - instanceGroup: The name of the regional instance group where the +// named ports are updated. +// - project: Project ID for this request. +// - region: Name of the region scoping this request. +func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { + c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceGroup = instanceGroup + c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceGroup": c.instanceGroup, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Sets the named ports for the specified regional instance group.", + // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", + // "httpMethod": "POST", + // "id": "compute.regionInstanceGroups.setNamedPorts", + // "parameterOrder": [ + // "project", + // "region", + // "instanceGroup" + // ], + // "parameters": { + // "instanceGroup": { + // "description": "The name of the regional instance group where the named ports are updated.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region scoping this request.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", + // "request": { + // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceTemplates.delete": + +type RegionInstanceTemplatesDeleteCall struct { + s *Service + project string + region string + instanceTemplate string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes the specified instance template. Deleting an instance +// template is permanent and cannot be undone. +// +// - instanceTemplate: The name of the instance template to delete. +// - project: Project ID for this request. +// - region: The name of the region for this request. +func (r *RegionInstanceTemplatesService) Delete(project string, region string, instanceTemplate string) *RegionInstanceTemplatesDeleteCall { + c := &RegionInstanceTemplatesDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceTemplate = instanceTemplate + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceTemplatesDeleteCall) RequestId(requestId string) *RegionInstanceTemplatesDeleteCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceTemplatesDeleteCall) Context(ctx context.Context) *RegionInstanceTemplatesDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceTemplatesDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceTemplate": c.instanceTemplate, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceTemplates.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified instance template. Deleting an instance template is permanent and cannot be undone.", + // "flatPath": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", + // "httpMethod": "DELETE", + // "id": "compute.regionInstanceTemplates.delete", + // "parameterOrder": [ + // "project", + // "region", + // "instanceTemplate" + // ], + // "parameters": { + // "instanceTemplate": { + // "description": "The name of the instance template to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceTemplates.get": + +type RegionInstanceTemplatesGetCall struct { + s *Service + project string + region string + instanceTemplate string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the specified instance template. +// +// - instanceTemplate: The name of the instance template. +// - project: Project ID for this request. +// - region: The name of the region for this request. +func (r *RegionInstanceTemplatesService) Get(project string, region string, instanceTemplate string) *RegionInstanceTemplatesGetCall { + c := &RegionInstanceTemplatesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instanceTemplate = instanceTemplate + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceTemplatesGetCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceTemplatesGetCall) IfNoneMatch(entityTag string) *RegionInstanceTemplatesGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceTemplatesGetCall) Context(ctx context.Context) *RegionInstanceTemplatesGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceTemplatesGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "instanceTemplate": c.instanceTemplate, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceTemplates.get" call. +// Exactly one of *InstanceTemplate or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplate.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTemplate, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &InstanceTemplate{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified instance template.", + // "flatPath": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", + // "httpMethod": "GET", + // "id": "compute.regionInstanceTemplates.get", + // "parameterOrder": [ + // "project", + // "region", + // "instanceTemplate" + // ], + // "parameters": { + // "instanceTemplate": { + // "description": "The name of the instance template.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/instanceTemplates/{instanceTemplate}", + // "response": { + // "$ref": "InstanceTemplate" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.regionInstanceTemplates.insert": + +type RegionInstanceTemplatesInsertCall struct { + s *Service + project string + region string + instancetemplate *InstanceTemplate + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates an instance template in the specified project and +// region using the global instance template whose URL is included in +// the request. +// +// - project: Project ID for this request. +// - region: The name of the region for this request. +func (r *RegionInstanceTemplatesService) Insert(project string, region string, instancetemplate *InstanceTemplate) *RegionInstanceTemplatesInsertCall { + c := &RegionInstanceTemplatesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.instancetemplate = instancetemplate + return c +} + +// RequestId sets the optional parameter "requestId": An optional +// request ID to identify requests. Specify a unique request ID so that +// if you must retry your request, the server will know to ignore the +// request if it has already been completed. For example, consider a +// situation where you make an initial request and the request times +// out. If you make the request again with the same request ID, the +// server can check if original operation with the same request ID was +// received, and if so, will ignore the second request. This prevents +// clients from accidentally creating duplicate commitments. The request +// ID must be a valid UUID with the exception that zero UUID is not +// supported ( 00000000-0000-0000-0000-000000000000). +func (c *RegionInstanceTemplatesInsertCall) RequestId(requestId string) *RegionInstanceTemplatesInsertCall { + c.urlParams_.Set("requestId", requestId) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RegionInstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RegionInstanceTemplatesInsertCall) Context(ctx context.Context) *RegionInstanceTemplatesInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *RegionInstanceTemplatesInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *RegionInstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.regionInstanceTemplates.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RegionInstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates an instance template in the specified project and region using the global instance template whose URL is included in the request.", + // "flatPath": "projects/{project}/regions/{region}/instanceTemplates", + // "httpMethod": "POST", + // "id": "compute.regionInstanceTemplates.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "The name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "requestId": { + // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{project}/regions/{region}/instanceTemplates", + // "request": { + // "$ref": "InstanceTemplate" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.regionInstanceTemplates.list": + +type RegionInstanceTemplatesListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of instance templates that are contained +// within the specified project and region. +// +// - project: Project ID for this request. +// - region: The name of the regions for this request. +func (r *RegionInstanceTemplatesService) List(project string, region string) *RegionInstanceTemplatesListCall { + c := &RegionInstanceTemplatesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": A filter expression that +// filters resources listed in the response. Most Compute resources +// support two types of filter expressions: expressions that support +// regular expressions and expressions that follow API improvement +// proposal AIP-160. If you want to use AIP-160, your expression must +// specify the field name, an operator, and the value that you want to +// use for filtering. The value must be a string, a number, or a +// boolean. The operator must be either `=`, `!=`, `>`, `<`, `<=`, `>=` +// or `:`. For example, if you are filtering Compute Engine instances, +// you can exclude instances named `example-instance` by specifying +// `name != example-instance`. The `:` operator can be used with string +// fields to match substrings. For non-string fields it is equivalent to +// the `=` operator. The `:*` comparison can be used to test whether a +// key has been defined. For example, to find all objects with `owner` +// label use: ``` labels.owner:* ``` You can also filter nested fields. +// For example, you could specify `scheduling.automaticRestart = false` +// to include instances only if they are not scheduled for automatic +// restarts. You can use filtering on nested fields to filter based on +// resource labels. To filter on multiple expressions, provide each +// separate expression within parentheses. For example: ``` +// (scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +// ``` By default, each expression is an `AND` expression. However, you +// can include `AND` and `OR` expressions explicitly. For example: ``` +// (cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +// AND (scheduling.automaticRestart = true) ``` If you want to use a +// regular expression, use the `eq` (equal) or `ne` (not equal) operator +// against a single un-parenthesized expression with or without quotes +// or against multiple parenthesized expressions. Examples: `fieldname +// eq unquoted literal` `fieldname eq 'single quoted literal'` +// `fieldname eq "double quoted literal" `(fieldname1 eq literal) +// (fieldname2 ne "literal")` The literal value is interpreted as a +// regular expression using Google RE2 library syntax. The literal value +// must match the entire field. For example, to filter for instances +// that do not end with name "instance", you would use `name ne +// .*instance`. +func (c *RegionInstanceTemplatesListCall) Filter(filter string) *RegionInstanceTemplatesListCall { c.urlParams_.Set("filter", filter) return c } @@ -146150,7 +150970,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Filter(filter string) *RegionIns // a `nextPageToken` that can be used to get the next page of results in // subsequent list requests. Acceptable values are `0` to `500`, // inclusive. (Default: `500`) -func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *RegionInstanceGroupsListInstancesCall { +func (c *RegionInstanceTemplatesListCall) MaxResults(maxResults int64) *RegionInstanceTemplatesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } @@ -146164,7 +150984,7 @@ func (c *RegionInstanceGroupsListInstancesCall) MaxResults(maxResults int64) *Re // result first). Use this to sort resources like operations so that the // newest operation is returned first. Currently, only sorting by `name` // or `creationTimestamp desc` is supported. -func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionInstanceGroupsListInstancesCall { +func (c *RegionInstanceTemplatesListCall) OrderBy(orderBy string) *RegionInstanceTemplatesListCall { c.urlParams_.Set("orderBy", orderBy) return c } @@ -146172,7 +150992,7 @@ func (c *RegionInstanceGroupsListInstancesCall) OrderBy(orderBy string) *RegionI // PageToken sets the optional parameter "pageToken": Specifies a page // token to use. Set `pageToken` to the `nextPageToken` returned by a // previous list request to get the next page of results. -func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *RegionInstanceGroupsListInstancesCall { +func (c *RegionInstanceTemplatesListCall) PageToken(pageToken string) *RegionInstanceTemplatesListCall { c.urlParams_.Set("pageToken", pageToken) return c } @@ -146181,7 +151001,7 @@ func (c *RegionInstanceGroupsListInstancesCall) PageToken(pageToken string) *Reg // "returnPartialSuccess": Opt-in for partial success behavior which // provides partial results in case of failure. The default value is // false. -func (c *RegionInstanceGroupsListInstancesCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceGroupsListInstancesCall { +func (c *RegionInstanceTemplatesListCall) ReturnPartialSuccess(returnPartialSuccess bool) *RegionInstanceTemplatesListCall { c.urlParams_.Set("returnPartialSuccess", fmt.Sprint(returnPartialSuccess)) return c } @@ -146189,67 +151009,73 @@ func (c *RegionInstanceGroupsListInstancesCall) ReturnPartialSuccess(returnParti // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. -func (c *RegionInstanceGroupsListInstancesCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsListInstancesCall { +func (c *RegionInstanceTemplatesListCall) Fields(s ...googleapi.Field) *RegionInstanceTemplatesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RegionInstanceTemplatesListCall) IfNoneMatch(entityTag string) *RegionInstanceTemplatesListCall { + c.ifNoneMatch_ = entityTag + return c +} + // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. -func (c *RegionInstanceGroupsListInstancesCall) Context(ctx context.Context) *RegionInstanceGroupsListInstancesCall { +func (c *RegionInstanceTemplatesListCall) Context(ctx context.Context) *RegionInstanceTemplatesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. -func (c *RegionInstanceGroupsListInstancesCall) Header() http.Header { +func (c *RegionInstanceTemplatesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } -func (c *RegionInstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { +func (c *RegionInstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupslistinstancesrequest) - if err != nil { - return nil, err + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } - reqHeaders.Set("Content-Type", "application/json") + var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceTemplates") urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) + req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, + "project": c.project, + "region": c.region, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } -// Do executes the "compute.regionInstanceGroups.listInstances" call. -// Exactly one of *RegionInstanceGroupsListInstances or error will be -// non-nil. Any non-2xx status code is an error. Response headers are in -// either *RegionInstanceGroupsListInstances.ServerResponse.Header or -// (if a response was returned at all) in -// error.(*googleapi.Error).Header. Use googleapi.IsNotModified to check -// whether the returned error was because http.StatusNotModified was -// returned. -func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*RegionInstanceGroupsListInstances, error) { +// Do executes the "compute.regionInstanceTemplates.list" call. +// Exactly one of *InstanceTemplateList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *InstanceTemplateList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RegionInstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceTemplateList, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { @@ -146268,7 +151094,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) if err := googleapi.CheckResponse(res); err != nil { return nil, gensupport.WrapError(err) } - ret := &RegionInstanceGroupsListInstances{ + ret := &InstanceTemplateList{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, @@ -146280,14 +151106,13 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) } return ret, nil // { - // "description": "Lists the instances in the specified instance group and displays information about the named ports. Depending on the specified options, this method can list all instances or only the instances that are running. The orderBy query parameter is not supported.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.listInstances", + // "description": "Retrieves a list of instance templates that are contained within the specified project and region.", + // "flatPath": "projects/{project}/regions/{region}/instanceTemplates", + // "httpMethod": "GET", + // "id": "compute.regionInstanceTemplates.list", // "parameterOrder": [ // "project", - // "region", - // "instanceGroup" + // "region" // ], // "parameters": { // "filter": { @@ -146295,12 +151120,6 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // "location": "query", // "type": "string" // }, - // "instanceGroup": { - // "description": "Name of the regional instance group for which we want to list the instances.", - // "location": "path", - // "required": true, - // "type": "string" - // }, // "maxResults": { // "default": "500", // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than `maxResults`, Compute Engine returns a `nextPageToken` that can be used to get the next page of results in subsequent list requests. Acceptable values are `0` to `500`, inclusive. (Default: `500`)", @@ -146327,8 +151146,9 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // "type": "string" // }, // "region": { - // "description": "Name of the region scoping this request.", + // "description": "The name of the regions for this request.", // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", // "required": true, // "type": "string" // }, @@ -146338,12 +151158,9 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // "type": "boolean" // } // }, - // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/listInstances", - // "request": { - // "$ref": "RegionInstanceGroupsListInstancesRequest" - // }, + // "path": "projects/{project}/regions/{region}/instanceTemplates", // "response": { - // "$ref": "RegionInstanceGroupsListInstances" + // "$ref": "InstanceTemplateList" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", @@ -146357,7 +151174,7 @@ func (c *RegionInstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. -func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f func(*RegionInstanceGroupsListInstances) error) error { +func (c *RegionInstanceTemplatesListCall) Pages(ctx context.Context, f func(*InstanceTemplateList) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { @@ -146375,194 +151192,6 @@ func (c *RegionInstanceGroupsListInstancesCall) Pages(ctx context.Context, f fun } } -// method id "compute.regionInstanceGroups.setNamedPorts": - -type RegionInstanceGroupsSetNamedPortsCall struct { - s *Service - project string - region string - instanceGroup string - regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest - urlParams_ gensupport.URLParams - ctx_ context.Context - header_ http.Header -} - -// SetNamedPorts: Sets the named ports for the specified regional -// instance group. -// -// - instanceGroup: The name of the regional instance group where the -// named ports are updated. -// - project: Project ID for this request. -// - region: Name of the region scoping this request. -func (r *RegionInstanceGroupsService) SetNamedPorts(project string, region string, instanceGroup string, regioninstancegroupssetnamedportsrequest *RegionInstanceGroupsSetNamedPortsRequest) *RegionInstanceGroupsSetNamedPortsCall { - c := &RegionInstanceGroupsSetNamedPortsCall{s: r.s, urlParams_: make(gensupport.URLParams)} - c.project = project - c.region = region - c.instanceGroup = instanceGroup - c.regioninstancegroupssetnamedportsrequest = regioninstancegroupssetnamedportsrequest - return c -} - -// RequestId sets the optional parameter "requestId": An optional -// request ID to identify requests. Specify a unique request ID so that -// if you must retry your request, the server will know to ignore the -// request if it has already been completed. For example, consider a -// situation where you make an initial request and the request times -// out. If you make the request again with the same request ID, the -// server can check if original operation with the same request ID was -// received, and if so, will ignore the second request. This prevents -// clients from accidentally creating duplicate commitments. The request -// ID must be a valid UUID with the exception that zero UUID is not -// supported ( 00000000-0000-0000-0000-000000000000). -func (c *RegionInstanceGroupsSetNamedPortsCall) RequestId(requestId string) *RegionInstanceGroupsSetNamedPortsCall { - c.urlParams_.Set("requestId", requestId) - return c -} - -// Fields allows partial responses to be retrieved. See -// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse -// for more information. -func (c *RegionInstanceGroupsSetNamedPortsCall) Fields(s ...googleapi.Field) *RegionInstanceGroupsSetNamedPortsCall { - c.urlParams_.Set("fields", googleapi.CombineFields(s)) - return c -} - -// Context sets the context to be used in this call's Do method. Any -// pending HTTP request will be aborted if the provided context is -// canceled. -func (c *RegionInstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *RegionInstanceGroupsSetNamedPortsCall { - c.ctx_ = ctx - return c -} - -// Header returns an http.Header that can be modified by the caller to -// add HTTP headers to the request. -func (c *RegionInstanceGroupsSetNamedPortsCall) Header() http.Header { - if c.header_ == nil { - c.header_ = make(http.Header) - } - return c.header_ -} - -func (c *RegionInstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { - reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) - for k, v := range c.header_ { - reqHeaders[k] = v - } - reqHeaders.Set("User-Agent", c.s.userAgent()) - var body io.Reader = nil - body, err := googleapi.WithoutDataWrapper.JSONReader(c.regioninstancegroupssetnamedportsrequest) - if err != nil { - return nil, err - } - reqHeaders.Set("Content-Type", "application/json") - c.urlParams_.Set("alt", alt) - c.urlParams_.Set("prettyPrint", "false") - urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts") - urls += "?" + c.urlParams_.Encode() - req, err := http.NewRequest("POST", urls, body) - if err != nil { - return nil, err - } - req.Header = reqHeaders - googleapi.Expand(req.URL, map[string]string{ - "project": c.project, - "region": c.region, - "instanceGroup": c.instanceGroup, - }) - return gensupport.SendRequest(c.ctx_, c.s.client, req) -} - -// Do executes the "compute.regionInstanceGroups.setNamedPorts" call. -// Exactly one of *Operation or error will be non-nil. Any non-2xx -// status code is an error. Response headers are in either -// *Operation.ServerResponse.Header or (if a response was returned at -// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified -// to check whether the returned error was because -// http.StatusNotModified was returned. -func (c *RegionInstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Operation, error) { - gensupport.SetOptions(c.urlParams_, opts...) - res, err := c.doRequest("json") - if res != nil && res.StatusCode == http.StatusNotModified { - if res.Body != nil { - res.Body.Close() - } - return nil, gensupport.WrapError(&googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - }) - } - if err != nil { - return nil, err - } - defer googleapi.CloseBody(res) - if err := googleapi.CheckResponse(res); err != nil { - return nil, gensupport.WrapError(err) - } - ret := &Operation{ - ServerResponse: googleapi.ServerResponse{ - Header: res.Header, - HTTPStatusCode: res.StatusCode, - }, - } - target := &ret - if err := gensupport.DecodeResponse(target, res); err != nil { - return nil, err - } - return ret, nil - // { - // "description": "Sets the named ports for the specified regional instance group.", - // "flatPath": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", - // "httpMethod": "POST", - // "id": "compute.regionInstanceGroups.setNamedPorts", - // "parameterOrder": [ - // "project", - // "region", - // "instanceGroup" - // ], - // "parameters": { - // "instanceGroup": { - // "description": "The name of the regional instance group where the named ports are updated.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "project": { - // "description": "Project ID for this request.", - // "location": "path", - // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", - // "required": true, - // "type": "string" - // }, - // "region": { - // "description": "Name of the region scoping this request.", - // "location": "path", - // "required": true, - // "type": "string" - // }, - // "requestId": { - // "description": "An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported ( 00000000-0000-0000-0000-000000000000).", - // "location": "query", - // "type": "string" - // } - // }, - // "path": "projects/{project}/regions/{region}/instanceGroups/{instanceGroup}/setNamedPorts", - // "request": { - // "$ref": "RegionInstanceGroupsSetNamedPortsRequest" - // }, - // "response": { - // "$ref": "Operation" - // }, - // "scopes": [ - // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/compute" - // ] - // } - -} - // method id "compute.regionInstances.bulkInsert": type RegionInstancesBulkInsertCall struct { @@ -146933,8 +151562,7 @@ type RegionNetworkEndpointGroupsGetCall struct { header_ http.Header } -// Get: Returns the specified network endpoint group. Gets a list of -// available network endpoint groups by making a list() request. +// Get: Returns the specified network endpoint group. // // - networkEndpointGroup: The name of the network endpoint group. It // should comply with RFC1035. @@ -147050,7 +151678,7 @@ func (c *RegionNetworkEndpointGroupsGetCall) Do(opts ...googleapi.CallOption) (* } return ret, nil // { - // "description": "Returns the specified network endpoint group. Gets a list of available network endpoint groups by making a list() request.", + // "description": "Returns the specified network endpoint group.", // "flatPath": "projects/{project}/regions/{region}/networkEndpointGroups/{networkEndpointGroup}", // "httpMethod": "GET", // "id": "compute.regionNetworkEndpointGroups.get", @@ -155736,8 +160364,7 @@ type RegionTargetHttpProxiesGetCall struct { } // Get: Returns the specified TargetHttpProxy resource in the specified -// region. Gets a list of available target HTTP proxies by making a -// list() request. +// region. // // - project: Project ID for this request. // - region: Name of the region scoping this request. @@ -155851,7 +160478,7 @@ func (c *RegionTargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*Targ } return ret, nil // { - // "description": "Returns the specified TargetHttpProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + // "description": "Returns the specified TargetHttpProxy resource in the specified region.", // "flatPath": "projects/{project}/regions/{region}/targetHttpProxies/{targetHttpProxy}", // "httpMethod": "GET", // "id": "compute.regionTargetHttpProxies.get", @@ -156745,8 +161372,7 @@ type RegionTargetHttpsProxiesGetCall struct { } // Get: Returns the specified TargetHttpsProxy resource in the specified -// region. Gets a list of available target HTTP proxies by making a -// list() request. +// region. // // - project: Project ID for this request. // - region: Name of the region scoping this request. @@ -156860,7 +161486,7 @@ func (c *RegionTargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*Tar } return ret, nil // { - // "description": "Returns the specified TargetHttpsProxy resource in the specified region. Gets a list of available target HTTP proxies by making a list() request.", + // "description": "Returns the specified TargetHttpsProxy resource in the specified region.", // "flatPath": "projects/{project}/regions/{region}/targetHttpsProxies/{targetHttpsProxy}", // "httpMethod": "GET", // "id": "compute.regionTargetHttpsProxies.get", @@ -158943,8 +163569,7 @@ type RegionUrlMapsGetCall struct { header_ http.Header } -// Get: Returns the specified UrlMap resource. Gets a list of available -// URL maps by making a list() request. +// Get: Returns the specified UrlMap resource. // // - project: Project ID for this request. // - region: Name of the region scoping this request. @@ -159058,7 +163683,7 @@ func (c *RegionUrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) } return ret, nil // { - // "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", + // "description": "Returns the specified UrlMap resource.", // "flatPath": "projects/{project}/regions/{region}/urlMaps/{urlMap}", // "httpMethod": "GET", // "id": "compute.regionUrlMaps.get", @@ -160105,10 +164730,9 @@ type RegionsGetCall struct { header_ http.Header } -// Get: Returns the specified Region resource. Gets a list of available -// regions by making a list() request. To decrease latency for this -// method, you can optionally omit any unneeded information from the -// response by using a field mask. This practice is especially +// Get: Returns the specified Region resource. To decrease latency for +// this method, you can optionally omit any unneeded information from +// the response by using a field mask. This practice is especially // recommended for unused quota information (the `quotas` field). To // exclude one or more fields, set your request's `fields` query // parameter to only include the fields you need. For example, to only @@ -160224,7 +164848,7 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { } return ret, nil // { - // "description": "Returns the specified Region resource. Gets a list of available regions by making a list() request. To decrease latency for this method, you can optionally omit any unneeded information from the response by using a field mask. This practice is especially recommended for unused quota information (the `quotas` field). To exclude one or more fields, set your request's `fields` query parameter to only include the fields you need. For example, to only include the `id` and `selfLink` fields, add the query parameter `?fields=id,selfLink` to your request.", + // "description": "Returns the specified Region resource. To decrease latency for this method, you can optionally omit any unneeded information from the response by using a field mask. This practice is especially recommended for unused quota information (the `quotas` field). To exclude one or more fields, set your request's `fields` query parameter to only include the fields you need. For example, to only include the `id` and `selfLink` fields, add the query parameter `?fields=id,selfLink` to your request.", // "flatPath": "projects/{project}/regions/{region}", // "httpMethod": "GET", // "id": "compute.regions.get", @@ -164715,8 +169339,7 @@ type RoutersGetCall struct { header_ http.Header } -// Get: Returns the specified Router resource. Gets a list of available -// routers by making a list() request. +// Get: Returns the specified Router resource. // // - project: Project ID for this request. // - region: Name of the region for this request. @@ -164830,7 +169453,7 @@ func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { } return ret, nil // { - // "description": "Returns the specified Router resource. Gets a list of available routers by making a list() request.", + // "description": "Returns the specified Router resource.", // "flatPath": "projects/{project}/regions/{region}/routers/{router}", // "httpMethod": "GET", // "id": "compute.routers.get", @@ -164954,6 +169577,15 @@ func (c *RoutersGetNatMappingInfoCall) MaxResults(maxResults int64) *RoutersGetN return c } +// NatName sets the optional parameter "natName": Name of the nat +// service to filter the Nat Mapping information. If it is omitted, all +// nats for this router will be returned. Name should conform to +// RFC1035. +func (c *RoutersGetNatMappingInfoCall) NatName(natName string) *RoutersGetNatMappingInfoCall { + c.urlParams_.Set("natName", natName) + return c +} + // OrderBy sets the optional parameter "orderBy": Sorts list results by // a certain order. By default, results are returned in alphanumerical // order based on the resource name. You can also sort results in @@ -165109,6 +169741,11 @@ func (c *RoutersGetNatMappingInfoCall) Do(opts ...googleapi.CallOption) (*VmEndp // "minimum": "0", // "type": "integer" // }, + // "natName": { + // "description": "Name of the nat service to filter the Nat Mapping information. If it is omitted, all nats for this router will be returned. Name should conform to RFC1035.", + // "location": "query", + // "type": "string" + // }, // "orderBy": { // "description": "Sorts list results by a certain order. By default, results are returned in alphanumerical order based on the resource name. You can also sort results in descending order based on the creation timestamp using `orderBy=\"creationTimestamp desc\"`. This sorts results based on the `creationTimestamp` field in reverse chronological order (newest result first). Use this to sort resources like operations so that the newest operation is returned first. Currently, only sorting by `name` or `creationTimestamp desc` is supported.", // "location": "query", @@ -166552,8 +171189,7 @@ type RoutesGetCall struct { header_ http.Header } -// Get: Returns the specified Route resource. Gets a list of available -// routes by making a list() request. +// Get: Returns the specified Route resource. // // - project: Project ID for this request. // - route: Name of the Route resource to return. @@ -166664,7 +171300,7 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { } return ret, nil // { - // "description": "Returns the specified Route resource. Gets a list of available routes by making a list() request.", + // "description": "Returns the specified Route resource.", // "flatPath": "projects/{project}/global/routes/{route}", // "httpMethod": "GET", // "id": "compute.routes.get", @@ -171521,8 +176157,7 @@ type SnapshotsGetCall struct { header_ http.Header } -// Get: Returns the specified Snapshot resource. Gets a list of -// available snapshots by making a list() request. +// Get: Returns the specified Snapshot resource. // // - project: Project ID for this request. // - snapshot: Name of the Snapshot resource to return. @@ -171633,7 +176268,7 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { } return ret, nil // { - // "description": "Returns the specified Snapshot resource. Gets a list of available snapshots by making a list() request.", + // "description": "Returns the specified Snapshot resource.", // "flatPath": "projects/{project}/global/snapshots/{snapshot}", // "httpMethod": "GET", // "id": "compute.snapshots.get", @@ -173237,8 +177872,7 @@ type SslCertificatesGetCall struct { header_ http.Header } -// Get: Returns the specified SslCertificate resource. Gets a list of -// available SSL certificates by making a list() request. +// Get: Returns the specified SslCertificate resource. // // - project: Project ID for this request. // - sslCertificate: Name of the SslCertificate resource to return. @@ -173349,7 +177983,7 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica } return ret, nil // { - // "description": "Returns the specified SslCertificate resource. Gets a list of available SSL certificates by making a list() request.", + // "description": "Returns the specified SslCertificate resource.", // "flatPath": "projects/{project}/global/sslCertificates/{sslCertificate}", // "httpMethod": "GET", // "id": "compute.sslCertificates.get", @@ -174469,8 +179103,7 @@ type SslPoliciesInsertCall struct { header_ http.Header } -// Insert: Returns the specified SSL policy resource. Gets a list of -// available SSL policies by making a list() request. +// Insert: Returns the specified SSL policy resource. // // - project: Project ID for this request. func (r *SslPoliciesService) Insert(project string, sslpolicy *SslPolicy) *SslPoliciesInsertCall { @@ -174587,7 +179220,7 @@ func (c *SslPoliciesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er } return ret, nil // { - // "description": "Returns the specified SSL policy resource. Gets a list of available SSL policies by making a list() request.", + // "description": "Returns the specified SSL policy resource.", // "flatPath": "projects/{project}/global/sslPolicies", // "httpMethod": "POST", // "id": "compute.sslPolicies.insert", @@ -176018,8 +180651,7 @@ type SubnetworksGetCall struct { header_ http.Header } -// Get: Returns the specified subnetwork. Gets a list of available -// subnetworks list() request. +// Get: Returns the specified subnetwork. // // - project: Project ID for this request. // - region: Name of the region scoping this request. @@ -176133,7 +180765,7 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro } return ret, nil // { - // "description": "Returns the specified subnetwork. Gets a list of available subnetworks list() request.", + // "description": "Returns the specified subnetwork.", // "flatPath": "projects/{project}/regions/{region}/subnetworks/{subnetwork}", // "httpMethod": "GET", // "id": "compute.subnetworks.get", @@ -179277,8 +183909,7 @@ type TargetHttpProxiesGetCall struct { header_ http.Header } -// Get: Returns the specified TargetHttpProxy resource. Gets a list of -// available target HTTP proxies by making a list() request. +// Get: Returns the specified TargetHttpProxy resource. // // - project: Project ID for this request. // - targetHttpProxy: Name of the TargetHttpProxy resource to return. @@ -179389,7 +184020,7 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp } return ret, nil // { - // "description": "Returns the specified TargetHttpProxy resource. Gets a list of available target HTTP proxies by making a list() request.", + // "description": "Returns the specified TargetHttpProxy resource.", // "flatPath": "projects/{project}/global/targetHttpProxies/{targetHttpProxy}", // "httpMethod": "GET", // "id": "compute.targetHttpProxies.get", @@ -180701,8 +185332,7 @@ type TargetHttpsProxiesGetCall struct { header_ http.Header } -// Get: Returns the specified TargetHttpsProxy resource. Gets a list of -// available target HTTPS proxies by making a list() request. +// Get: Returns the specified TargetHttpsProxy resource. // // - project: Project ID for this request. // - targetHttpsProxy: Name of the TargetHttpsProxy resource to return. @@ -180813,7 +185443,7 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt } return ret, nil // { - // "description": "Returns the specified TargetHttpsProxy resource. Gets a list of available target HTTPS proxies by making a list() request.", + // "description": "Returns the specified TargetHttpsProxy resource.", // "flatPath": "projects/{project}/global/targetHttpsProxies/{targetHttpsProxy}", // "httpMethod": "GET", // "id": "compute.targetHttpsProxies.get", @@ -182849,8 +187479,7 @@ type TargetInstancesGetCall struct { header_ http.Header } -// Get: Returns the specified TargetInstance resource. Gets a list of -// available target instances by making a list() request. +// Get: Returns the specified TargetInstance resource. // // - project: Project ID for this request. // - targetInstance: Name of the TargetInstance resource to return. @@ -182964,7 +187593,7 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan } return ret, nil // { - // "description": "Returns the specified TargetInstance resource. Gets a list of available target instances by making a list() request.", + // "description": "Returns the specified TargetInstance resource.", // "flatPath": "projects/{project}/zones/{zone}/targetInstances/{targetInstance}", // "httpMethod": "GET", // "id": "compute.targetInstances.get", @@ -184342,8 +188971,7 @@ type TargetPoolsGetCall struct { header_ http.Header } -// Get: Returns the specified target pool. Gets a list of available -// target pools by making a list() request. +// Get: Returns the specified target pool. // // - project: Project ID for this request. // - region: Name of the region scoping this request. @@ -184457,7 +189085,7 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro } return ret, nil // { - // "description": "Returns the specified target pool. Gets a list of available target pools by making a list() request.", + // "description": "Returns the specified target pool.", // "flatPath": "projects/{project}/regions/{region}/targetPools/{targetPool}", // "httpMethod": "GET", // "id": "compute.targetPools.get", @@ -185898,8 +190526,7 @@ type TargetSslProxiesGetCall struct { header_ http.Header } -// Get: Returns the specified TargetSslProxy resource. Gets a list of -// available target SSL proxies by making a list() request. +// Get: Returns the specified TargetSslProxy resource. // // - project: Project ID for this request. // - targetSslProxy: Name of the TargetSslProxy resource to return. @@ -186010,7 +190637,7 @@ func (c *TargetSslProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetSslPr } return ret, nil // { - // "description": "Returns the specified TargetSslProxy resource. Gets a list of available target SSL proxies by making a list() request.", + // "description": "Returns the specified TargetSslProxy resource.", // "flatPath": "projects/{project}/global/targetSslProxies/{targetSslProxy}", // "httpMethod": "GET", // "id": "compute.targetSslProxies.get", @@ -187856,8 +192483,7 @@ type TargetTcpProxiesGetCall struct { header_ http.Header } -// Get: Returns the specified TargetTcpProxy resource. Gets a list of -// available target TCP proxies by making a list() request. +// Get: Returns the specified TargetTcpProxy resource. // // - project: Project ID for this request. // - targetTcpProxy: Name of the TargetTcpProxy resource to return. @@ -187968,7 +192594,7 @@ func (c *TargetTcpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetTcpPr } return ret, nil // { - // "description": "Returns the specified TargetTcpProxy resource. Gets a list of available target TCP proxies by making a list() request.", + // "description": "Returns the specified TargetTcpProxy resource.", // "flatPath": "projects/{project}/global/targetTcpProxies/{targetTcpProxy}", // "httpMethod": "GET", // "id": "compute.targetTcpProxies.get", @@ -189292,8 +193918,7 @@ type TargetVpnGatewaysGetCall struct { header_ http.Header } -// Get: Returns the specified target VPN gateway. Gets a list of -// available target VPN gateways by making a list() request. +// Get: Returns the specified target VPN gateway. // // - project: Project ID for this request. // - region: Name of the region for this request. @@ -189407,7 +194032,7 @@ func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnG } return ret, nil // { - // "description": "Returns the specified target VPN gateway. Gets a list of available target VPN gateways by making a list() request.", + // "description": "Returns the specified target VPN gateway.", // "flatPath": "projects/{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}", // "httpMethod": "GET", // "id": "compute.targetVpnGateways.get", @@ -190586,8 +195211,7 @@ type UrlMapsGetCall struct { header_ http.Header } -// Get: Returns the specified UrlMap resource. Gets a list of available -// URL maps by making a list() request. +// Get: Returns the specified UrlMap resource. // // - project: Project ID for this request. // - urlMap: Name of the UrlMap resource to return. @@ -190698,7 +195322,7 @@ func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { } return ret, nil // { - // "description": "Returns the specified UrlMap resource. Gets a list of available URL maps by making a list() request.", + // "description": "Returns the specified UrlMap resource.", // "flatPath": "projects/{project}/global/urlMaps/{urlMap}", // "httpMethod": "GET", // "id": "compute.urlMaps.get", @@ -192359,8 +196983,7 @@ type VpnGatewaysGetCall struct { header_ http.Header } -// Get: Returns the specified VPN gateway. Gets a list of available VPN -// gateways by making a list() request. +// Get: Returns the specified VPN gateway. // // - project: Project ID for this request. // - region: Name of the region for this request. @@ -192474,7 +197097,7 @@ func (c *VpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*VpnGateway, erro } return ret, nil // { - // "description": "Returns the specified VPN gateway. Gets a list of available VPN gateways by making a list() request.", + // "description": "Returns the specified VPN gateway.", // "flatPath": "projects/{project}/regions/{region}/vpnGateways/{vpnGateway}", // "httpMethod": "GET", // "id": "compute.vpnGateways.get", @@ -194006,8 +198629,7 @@ type VpnTunnelsGetCall struct { header_ http.Header } -// Get: Returns the specified VpnTunnel resource. Gets a list of -// available VPN tunnels by making a list() request. +// Get: Returns the specified VpnTunnel resource. // // - project: Project ID for this request. // - region: Name of the region for this request. @@ -194121,7 +198743,7 @@ func (c *VpnTunnelsGetCall) Do(opts ...googleapi.CallOption) (*VpnTunnel, error) } return ret, nil // { - // "description": "Returns the specified VpnTunnel resource. Gets a list of available VPN tunnels by making a list() request.", + // "description": "Returns the specified VpnTunnel resource.", // "flatPath": "projects/{project}/regions/{region}/vpnTunnels/{vpnTunnel}", // "httpMethod": "GET", // "id": "compute.vpnTunnels.get", @@ -195597,8 +200219,7 @@ type ZonesGetCall struct { header_ http.Header } -// Get: Returns the specified Zone resource. Gets a list of available -// zones by making a list() request. +// Get: Returns the specified Zone resource. // // - project: Project ID for this request. // - zone: Name of the zone resource to return. @@ -195709,7 +200330,7 @@ func (c *ZonesGetCall) Do(opts ...googleapi.CallOption) (*Zone, error) { } return ret, nil // { - // "description": "Returns the specified Zone resource. Gets a list of available zones by making a list() request.", + // "description": "Returns the specified Zone resource.", // "flatPath": "projects/{project}/zones/{zone}", // "httpMethod": "GET", // "id": "compute.zones.get", diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index 65b125ab..b5e38c66 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -11,7 +11,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "net/url" "strings" @@ -144,7 +143,7 @@ func CheckResponse(res *http.Response) error { if res.StatusCode >= 200 && res.StatusCode <= 299 { return nil } - slurp, err := ioutil.ReadAll(res.Body) + slurp, err := io.ReadAll(res.Body) if err == nil { jerr := new(errorReply) err = json.Unmarshal(slurp, jerr) @@ -184,10 +183,11 @@ func CheckMediaResponse(res *http.Response) error { if res.StatusCode >= 200 && res.StatusCode <= 299 { return nil } - slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) + slurp, _ := io.ReadAll(io.LimitReader(res.Body, 1<<20)) return &Error{ - Code: res.StatusCode, - Body: string(slurp), + Code: res.StatusCode, + Body: string(slurp), + Header: res.Header, } } diff --git a/vendor/google.golang.org/api/internal/cba.go b/vendor/google.golang.org/api/internal/cba.go new file mode 100644 index 00000000..cecbb9ba --- /dev/null +++ b/vendor/google.golang.org/api/internal/cba.go @@ -0,0 +1,282 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// cba.go (certificate-based access) contains utils for implementing Device Certificate +// Authentication according to https://google.aip.dev/auth/4114 and Default Credentials +// for Google Cloud Virtual Environments according to https://google.aip.dev/auth/4115. +// +// The overall logic for DCA is as follows: +// 1. If both endpoint override and client certificate are specified, use them as is. +// 2. If user does not specify client certificate, we will attempt to use default +// client certificate. +// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if +// client certificate is available and defaultEndpoint otherwise. +// +// Implications of the above logic: +// 1. If the user specifies a non-mTLS endpoint override but client certificate is +// available, we will pass along the cert anyway and let the server decide what to do. +// 2. If the user specifies an mTLS endpoint override but client certificate is not +// available, we will not fail-fast, but let backend throw error when connecting. +// +// If running within Google's cloud environment, and client certificate is not specified +// and not available through DCA, we will try mTLS with credentials held by +// the Secure Session Agent, which is part of Google's cloud infrastructure. +// +// We would like to avoid introducing client-side logic that parses whether the +// endpoint override is an mTLS url, since the url pattern may change at anytime. +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. + +// Package internal supports the options and transport packages. +package internal + +import ( + "context" + "crypto/tls" + "net" + "net/url" + "os" + "strings" + + "github.com/google/s2a-go" + "github.com/google/s2a-go/fallback" + "google.golang.org/api/internal/cert" + "google.golang.org/grpc/credentials" +) + +const ( + mTLSModeAlways = "always" + mTLSModeNever = "never" + mTLSModeAuto = "auto" + + // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false. + googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A" +) + +// getClientCertificateSourceAndEndpoint is a convenience function that invokes +// getClientCertificateSource and getEndpoint sequentially and returns the client +// cert source and endpoint as a tuple. +func getClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, string, error) { + clientCertSource, err := getClientCertificateSource(settings) + if err != nil { + return nil, "", err + } + endpoint, err := getEndpoint(settings, clientCertSource) + if err != nil { + return nil, "", err + } + return clientCertSource, endpoint, nil +} + +type transportConfig struct { + clientCertSource cert.Source // The client certificate source. + endpoint string // The corresponding endpoint to use based on client certificate source. + s2aAddress string // The S2A address if it can be used, otherwise an empty string. + s2aMTLSEndpoint string // The MTLS endpoint to use with S2A. +} + +func getTransportConfig(settings *DialSettings) (*transportConfig, error) { + clientCertSource, endpoint, err := getClientCertificateSourceAndEndpoint(settings) + if err != nil { + return &transportConfig{ + clientCertSource: nil, endpoint: "", s2aAddress: "", s2aMTLSEndpoint: "", + }, err + } + defaultTransportConfig := transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + s2aAddress: "", + s2aMTLSEndpoint: "", + } + + // Check the env to determine whether to use S2A. + if !isGoogleS2AEnabled() { + return &defaultTransportConfig, nil + } + + // If client cert is found, use that over S2A. + // If MTLS is not enabled for the endpoint, skip S2A. + if clientCertSource != nil || !mtlsEndpointEnabledForS2A() { + return &defaultTransportConfig, nil + } + s2aMTLSEndpoint := settings.DefaultMTLSEndpoint + // If there is endpoint override, honor it. + if settings.Endpoint != "" { + s2aMTLSEndpoint = endpoint + } + s2aAddress := GetS2AAddress() + if s2aAddress == "" { + return &defaultTransportConfig, nil + } + return &transportConfig{ + clientCertSource: clientCertSource, + endpoint: endpoint, + s2aAddress: s2aAddress, + s2aMTLSEndpoint: s2aMTLSEndpoint, + }, nil +} + +func isGoogleS2AEnabled() bool { + return strings.ToLower(os.Getenv(googleAPIUseS2AEnv)) == "true" +} + +// getClientCertificateSource returns a default client certificate source, if +// not provided by the user. +// +// A nil default source can be returned if the source does not exist. Any exceptions +// encountered while initializing the default source will be reported as client +// error (ex. corrupt metadata file). +// +// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE +// must be set to "true" to allow certificate to be used (including user provided +// certificates). For details, see AIP-4114. +func getClientCertificateSource(settings *DialSettings) (cert.Source, error) { + if !isClientCertificateEnabled() { + return nil, nil + } else if settings.ClientCertSource != nil { + return settings.ClientCertSource, nil + } else { + return cert.DefaultSource() + } +} + +func isClientCertificateEnabled() bool { + useClientCert := os.Getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") + // TODO(andyrzhao): Update default to return "true" after DCA feature is fully released. + return strings.ToLower(useClientCert) == "true" +} + +// getEndpoint returns the endpoint for the service, taking into account the +// user-provided endpoint override "settings.Endpoint". +// +// If no endpoint override is specified, we will either return the default endpoint or +// the default mTLS endpoint if a client certificate is available. +// +// You can override the default endpoint choice (mtls vs. regular) by setting the +// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +// +// If the endpoint override is an address (host:port) rather than full base +// URL (ex. https://...), then the user-provided address will be merged into +// the default endpoint. For example, WithEndpoint("myhost:8000") and +// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" +func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { + if settings.Endpoint == "" { + mtlsMode := getMTLSMode() + if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { + return settings.DefaultMTLSEndpoint, nil + } + return settings.DefaultEndpoint, nil + } + if strings.Contains(settings.Endpoint, "://") { + // User passed in a full URL path, use it verbatim. + return settings.Endpoint, nil + } + if settings.DefaultEndpoint == "" { + // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. + // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. + return settings.Endpoint, nil + } + + // Assume user-provided endpoint is host[:port], merge it with the default endpoint. + return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) +} + +func getMTLSMode() string { + mode := os.Getenv("GOOGLE_API_USE_MTLS_ENDPOINT") + if mode == "" { + mode = os.Getenv("GOOGLE_API_USE_MTLS") // Deprecated. + } + if mode == "" { + return mTLSModeAuto + } + return strings.ToLower(mode) +} + +func mergeEndpoints(baseURL, newHost string) (string, error) { + u, err := url.Parse(fixScheme(baseURL)) + if err != nil { + return "", err + } + return strings.Replace(baseURL, u.Host, newHost, 1), nil +} + +func fixScheme(baseURL string) string { + if !strings.Contains(baseURL, "://") { + return "https://" + baseURL + } + return baseURL +} + +// GetGRPCTransportConfigAndEndpoint returns an instance of credentials.TransportCredentials, and the +// corresponding endpoint to use for GRPC client. +func GetGRPCTransportConfigAndEndpoint(settings *DialSettings) (credentials.TransportCredentials, string, error) { + config, err := getTransportConfig(settings) + if err != nil { + return nil, "", err + } + + defaultTransportCreds := credentials.NewTLS(&tls.Config{ + GetClientCertificate: config.clientCertSource, + }) + if config.s2aAddress == "" { + return defaultTransportCreds, config.endpoint, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackClientHandshakeFunc: fallbackHandshake, + } + } + + s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{ + S2AAddress: config.s2aAddress, + FallbackOpts: fallbackOpts, + }) + if err != nil { + // Use default if we cannot initialize S2A client transport credentials. + return defaultTransportCreds, config.endpoint, nil + } + return s2aTransportCreds, config.s2aMTLSEndpoint, nil +} + +// GetHTTPTransportConfigAndEndpoint returns a client certificate source, a function for dialing MTLS with S2A, +// and the endpoint to use for HTTP client. +func GetHTTPTransportConfigAndEndpoint(settings *DialSettings) (cert.Source, func(context.Context, string, string) (net.Conn, error), string, error) { + config, err := getTransportConfig(settings) + if err != nil { + return nil, nil, "", err + } + + if config.s2aAddress == "" { + return config.clientCertSource, nil, config.endpoint, nil + } + + var fallbackOpts *s2a.FallbackOptions + // In case of S2A failure, fall back to the endpoint that would've been used without S2A. + if fallbackURL, err := url.Parse(config.endpoint); err == nil { + if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil { + fallbackOpts = &s2a.FallbackOptions{ + FallbackDialer: &s2a.FallbackDialer{ + Dialer: fallbackDialer, + ServerAddr: fallbackServerAddr, + }, + } + } + } + + dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{ + S2AAddress: config.s2aAddress, + FallbackOpts: fallbackOpts, + }) + return nil, dialTLSContextFunc, config.s2aMTLSEndpoint, nil +} + +// mtlsEndpointEnabledForS2A checks if the endpoint is indeed MTLS-enabled, so that we can use S2A for MTLS connection. +var mtlsEndpointEnabledForS2A = func() bool { + // TODO(xmenxk): determine this via discovery config. + return true +} diff --git a/vendor/google.golang.org/api/transport/cert/default_cert.go b/vendor/google.golang.org/api/internal/cert/default_cert.go similarity index 100% rename from vendor/google.golang.org/api/transport/cert/default_cert.go rename to vendor/google.golang.org/api/internal/cert/default_cert.go diff --git a/vendor/google.golang.org/api/transport/cert/enterprise_cert.go b/vendor/google.golang.org/api/internal/cert/enterprise_cert.go similarity index 93% rename from vendor/google.golang.org/api/transport/cert/enterprise_cert.go rename to vendor/google.golang.org/api/internal/cert/enterprise_cert.go index eaa52e07..1061b5f0 100644 --- a/vendor/google.golang.org/api/transport/cert/enterprise_cert.go +++ b/vendor/google.golang.org/api/internal/cert/enterprise_cert.go @@ -15,7 +15,6 @@ package cert import ( "crypto/tls" "errors" - "os" "github.com/googleapis/enterprise-certificate-proxy/client" ) @@ -36,8 +35,7 @@ type ecpSource struct { func NewEnterpriseCertificateProxySource(configFilePath string) (Source, error) { key, err := client.Cred(configFilePath) if err != nil { - if errors.Is(err, os.ErrNotExist) { - // Config file missing means Enterprise Certificate Proxy is not supported. + if errors.Is(err, client.ErrCredUnavailable) { return nil, errSourceUnavailable } return nil, err diff --git a/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go b/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go similarity index 98% rename from vendor/google.golang.org/api/transport/cert/secureconnect_cert.go rename to vendor/google.golang.org/api/internal/cert/secureconnect_cert.go index 5913cab8..afd79ffe 100644 --- a/vendor/google.golang.org/api/transport/cert/secureconnect_cert.go +++ b/vendor/google.golang.org/api/internal/cert/secureconnect_cert.go @@ -18,7 +18,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "os/exec" "os/user" @@ -59,7 +58,7 @@ func NewSecureConnectSource(configFilePath string) (Source, error) { configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile) } - file, err := ioutil.ReadFile(configFilePath) + file, err := os.ReadFile(configFilePath) if err != nil { if errors.Is(err, os.ErrNotExist) { // Config file missing means Secure Connect is not supported. diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 32d52413..92b3acf6 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -6,10 +6,14 @@ package internal import ( "context" + "crypto/tls" "encoding/json" "errors" "fmt" - "io/ioutil" + "net" + "net/http" + "os" + "time" "golang.org/x/oauth2" "google.golang.org/api/internal/impersonate" @@ -17,6 +21,8 @@ import ( "golang.org/x/oauth2/google" ) +const quotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" + // Creds returns credential information obtained from DialSettings, or if none, then // it returns default credential information. func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { @@ -41,7 +47,7 @@ func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, erro return credentialsFromJSON(ctx, ds.CredentialsJSON, ds) } if ds.CredentialsFile != "" { - data, err := ioutil.ReadFile(ds.CredentialsFile) + data, err := os.ReadFile(ds.CredentialsFile) if err != nil { return nil, fmt.Errorf("cannot read credentials file: %v", err) } @@ -80,8 +86,25 @@ const ( // - Otherwise, executes standard OAuth 2.0 flow // More details: google.aip.dev/auth/4111 func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*google.Credentials, error) { + var params google.CredentialsParams + params.Scopes = ds.GetScopes() + + // Determine configurations for the OAuth2 transport, which is separate from the API transport. + // The OAuth2 transport and endpoint will be configured for mTLS if applicable. + clientCertSource, oauth2Endpoint, err := getClientCertificateSourceAndEndpoint(oauth2DialSettings(ds)) + if err != nil { + return nil, err + } + params.TokenURL = oauth2Endpoint + if clientCertSource != nil { + tlsConfig := &tls.Config{ + GetClientCertificate: clientCertSource, + } + ctx = context.WithValue(ctx, oauth2.HTTPClient, customHTTPClient(tlsConfig)) + } + // By default, a standard OAuth 2.0 token source is created - cred, err := google.CredentialsFromJSON(ctx, data, ds.GetScopes()...) + cred, err := google.CredentialsFromJSONWithParams(ctx, data, params) if err != nil { return nil, err } @@ -131,14 +154,22 @@ func selfSignedJWTTokenSource(data []byte, ds *DialSettings) (oauth2.TokenSource } } -// QuotaProjectFromCreds returns the quota project from the JSON blob in the provided credentials. -// -// NOTE(cbro): consider promoting this to a field on google.Credentials. -func QuotaProjectFromCreds(cred *google.Credentials) string { +// GetQuotaProject retrieves quota project with precedence being: client option, +// environment variable, creds file. +func GetQuotaProject(creds *google.Credentials, clientOpt string) string { + if clientOpt != "" { + return clientOpt + } + if env := os.Getenv(quotaProjectEnvVar); env != "" { + return env + } + if creds == nil { + return "" + } var v struct { QuotaProject string `json:"quota_project_id"` } - if err := json.Unmarshal(cred.JSON, &v); err != nil { + if err := json.Unmarshal(creds.JSON, &v); err != nil { return "" } return v.QuotaProject @@ -157,3 +188,35 @@ func impersonateCredentials(ctx context.Context, creds *google.Credentials, ds * ProjectID: creds.ProjectID, }, nil } + +// oauth2DialSettings returns the settings to be used by the OAuth2 transport, which is separate from the API transport. +func oauth2DialSettings(ds *DialSettings) *DialSettings { + var ods DialSettings + ods.DefaultEndpoint = google.Endpoint.TokenURL + ods.DefaultMTLSEndpoint = google.MTLSTokenURL + ods.ClientCertSource = ds.ClientCertSource + return &ods +} + +// customHTTPClient constructs an HTTPClient using the provided tlsConfig, to support mTLS. +func customHTTPClient(tlsConfig *tls.Config) *http.Client { + trans := baseTransport() + trans.TLSClientConfig = tlsConfig + return &http.Client{Transport: trans} +} + +func baseTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/google.golang.org/api/internal/gensupport/json.go b/vendor/google.golang.org/api/internal/gensupport/json.go index 1b770464..eab49a11 100644 --- a/vendor/google.golang.org/api/internal/gensupport/json.go +++ b/vendor/google.golang.org/api/internal/gensupport/json.go @@ -86,7 +86,12 @@ func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNu if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil { ms, ok := v.Interface().(map[string]string) if !ok { - return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]string", f.Name) + mi, err := initMapSlow(v, f.Name, useNullMaps) + if err != nil { + return nil, err + } + m[tag.apiName] = mi + continue } mi := map[string]interface{}{} for k, v := range ms { @@ -120,6 +125,25 @@ func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNu return m, nil } +// initMapSlow uses reflection to build up a map object. This is slower than +// the default behavior so it should be used only as a fallback. +func initMapSlow(rv reflect.Value, fieldName string, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) { + mi := map[string]interface{}{} + iter := rv.MapRange() + for iter.Next() { + k, ok := iter.Key().Interface().(string) + if !ok { + return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]any", fieldName) + } + v := iter.Value().Interface() + mi[k] = v + } + for k := range useNullMaps[fieldName] { + mi[k] = nil + } + return mi, nil +} + // formatAsString returns a string representation of v, dereferencing it first if possible. func formatAsString(v reflect.Value, kind reflect.Kind) string { if kind == reflect.Ptr && !v.IsNil() { diff --git a/vendor/google.golang.org/api/internal/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go index 8356e7f2..c048a570 100644 --- a/vendor/google.golang.org/api/internal/gensupport/media.go +++ b/vendor/google.golang.org/api/internal/gensupport/media.go @@ -8,7 +8,6 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "mime" "mime/multipart" "net/http" @@ -222,8 +221,8 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB toCleanup = append(toCleanup, combined) if fb != nil && fm != nil { getBody = func() (io.ReadCloser, error) { - rb := ioutil.NopCloser(fb()) - rm := ioutil.NopCloser(fm()) + rb := io.NopCloser(fb()) + rm := io.NopCloser(fm()) var mimeBoundary string if _, params, err := mime.ParseMediaType(ctype); err == nil { mimeBoundary = params["boundary"] @@ -243,7 +242,7 @@ func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newB fb := readerFunc(body) if fb != nil { getBody = func() (io.ReadCloser, error) { - rb := ioutil.NopCloser(fb()) + rb := io.NopCloser(fb()) toCleanup = append(toCleanup, rb) return rb, nil } diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go index 0c659188..08e7aace 100644 --- a/vendor/google.golang.org/api/internal/gensupport/resumable.go +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -43,8 +43,8 @@ type ResumableUpload struct { // retries should happen. ChunkRetryDeadline time.Duration - // Track current request invocation ID and attempt count for retry metric - // headers. + // Track current request invocation ID and attempt count for retry metrics + // and idempotency headers. invocationID string attempts int } @@ -81,10 +81,15 @@ func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, req.Header.Set("Content-Type", rx.MediaType) req.Header.Set("User-Agent", rx.UserAgent) + // TODO(b/274504690): Consider dropping gccl-invocation-id key since it + // duplicates the X-Goog-Gcs-Idempotency-Token header (added in v0.115.0). baseXGoogHeader := "gl-go/" + GoVersion() + " gdcl/" + internal.Version invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", rx.invocationID, rx.attempts) req.Header.Set("X-Goog-Api-Client", strings.Join([]string{baseXGoogHeader, invocationHeader}, " ")) + // Set idempotency token header which is used by GCS uploads. + req.Header.Set("X-Goog-Gcs-Idempotency-Token", rx.invocationID) + // Google's upload endpoint uses status code 308 for a // different purpose than the "308 Permanent Redirect" // since-standardized in RFC 7238. Because of the conflict in @@ -193,22 +198,27 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err // Each chunk gets its own initialized-at-zero backoff and invocation ID. bo := rx.Retry.backoff() - quitAfter := time.After(retryDeadline) + quitAfterTimer := time.NewTimer(retryDeadline) rx.attempts = 1 rx.invocationID = uuid.New().String() // Retry loop for a single chunk. for { + pauseTimer := time.NewTimer(pause) select { case <-ctx.Done(): + quitAfterTimer.Stop() + pauseTimer.Stop() if err == nil { err = ctx.Err() } return prepareReturn(resp, err) - case <-time.After(pause): - case <-quitAfter: + case <-pauseTimer.C: + case <-quitAfterTimer.C: + pauseTimer.Stop() return prepareReturn(resp, err) } + pauseTimer.Stop() // Check for context cancellation or timeout once more. If more than one // case in the select statement above was satisfied at the same time, Go @@ -217,11 +227,12 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err // canceled before or the timeout was reached. select { case <-ctx.Done(): + quitAfterTimer.Stop() if err == nil { err = ctx.Err() } return prepareReturn(resp, err) - case <-quitAfter: + case <-quitAfterTimer.C: return prepareReturn(resp, err) default: } @@ -235,6 +246,7 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err // Check if we should retry the request. if !errorFunc(status, err) { + quitAfterTimer.Stop() break } diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go index dd24139b..693a1b1a 100644 --- a/vendor/google.golang.org/api/internal/gensupport/send.go +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -115,15 +115,17 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r var errorFunc = retry.errorFunc() for { + t := time.NewTimer(pause) select { case <-ctx.Done(): + t.Stop() // If we got an error and the context has been canceled, return an error acknowledging // both the context cancelation and the service error. if err != nil { return resp, wrappedCallErr{ctx.Err(), err} } return resp, ctx.Err() - case <-time.After(pause): + case <-t.C: } if ctx.Err() != nil { @@ -136,9 +138,14 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r } return resp, ctx.Err() } + + // Set retry metrics and idempotency headers for GCS. + // TODO(b/274504690): Consider dropping gccl-invocation-id key since it + // duplicates the X-Goog-Gcs-Idempotency-Token header (added in v0.115.0). invocationHeader := fmt.Sprintf("gccl-invocation-id/%s gccl-attempt-count/%d", invocationID, attempts) xGoogHeader := strings.Join([]string{invocationHeader, baseXGoogHeader}, " ") req.Header.Set("X-Goog-Api-Client", xGoogHeader) + req.Header.Set("X-Goog-Gcs-Idempotency-Token", invocationID) resp, err = client.Do(req.WithContext(ctx)) diff --git a/vendor/google.golang.org/api/internal/impersonate/impersonate.go b/vendor/google.golang.org/api/internal/impersonate/impersonate.go index b465bbcd..4b2c775f 100644 --- a/vendor/google.golang.org/api/internal/impersonate/impersonate.go +++ b/vendor/google.golang.org/api/internal/impersonate/impersonate.go @@ -11,7 +11,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/http" "time" @@ -105,7 +104,7 @@ func (i impersonatedTokenSource) Token() (*oauth2.Token, error) { return nil, fmt.Errorf("impersonate: unable to generate access token: %v", err) } defer resp.Body.Close() - body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + body, err := io.ReadAll(io.LimitReader(resp.Body, 1<<20)) if err != nil { return nil, fmt.Errorf("impersonate: unable to read body: %v", err) } diff --git a/vendor/google.golang.org/api/internal/s2a.go b/vendor/google.golang.org/api/internal/s2a.go new file mode 100644 index 00000000..c5b421f5 --- /dev/null +++ b/vendor/google.golang.org/api/internal/s2a.go @@ -0,0 +1,136 @@ +// Copyright 2023 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "encoding/json" + "log" + "sync" + "time" + + "cloud.google.com/go/compute/metadata" +) + +const configEndpointSuffix = "googleAutoMtlsConfiguration" + +// The period an MTLS config can be reused before needing refresh. +var configExpiry = time.Hour + +// GetS2AAddress returns the S2A address to be reached via plaintext connection. +func GetS2AAddress() string { + c, err := getMetadataMTLSAutoConfig().Config() + if err != nil { + return "" + } + if !c.Valid() { + return "" + } + return c.S2A.PlaintextAddress +} + +type mtlsConfigSource interface { + Config() (*mtlsConfig, error) +} + +// mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source. +var ( + mdsMTLSAutoConfigSource mtlsConfigSource + once sync.Once +) + +// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh. +func getMetadataMTLSAutoConfig() mtlsConfigSource { + once.Do(func() { + mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{ + src: &metadataMTLSAutoConfig{}, + } + }) + return mdsMTLSAutoConfigSource +} + +// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry. +// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig. +type reuseMTLSConfigSource struct { + src mtlsConfigSource // src.Config() is called when config is expired + mu sync.Mutex // mutex guards config + config *mtlsConfig // cached config +} + +func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) { + cs.mu.Lock() + defer cs.mu.Unlock() + + if cs.config.Valid() { + return cs.config, nil + } + c, err := cs.src.Config() + if err != nil { + return nil, err + } + cs.config = c + return c, nil +} + +// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource +// It has the logic to query MDS and return an mtlsConfig +type metadataMTLSAutoConfig struct{} + +var httpGetMetadataMTLSConfig = func() (string, error) { + return metadata.Get(configEndpointSuffix) +} + +func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) { + resp, err := httpGetMetadataMTLSConfig() + if err != nil { + log.Printf("querying MTLS config from MDS endpoint failed: %v", err) + return defaultMTLSConfig(), nil + } + var config mtlsConfig + err = json.Unmarshal([]byte(resp), &config) + if err != nil { + log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err) + return defaultMTLSConfig(), nil + } + + if config.S2A == nil { + log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config) + return defaultMTLSConfig(), nil + } + + // set new expiry + config.Expiry = time.Now().Add(configExpiry) + return &config, nil +} + +func defaultMTLSConfig() *mtlsConfig { + return &mtlsConfig{ + S2A: &s2aAddresses{ + PlaintextAddress: "", + MTLSAddress: "", + }, + Expiry: time.Now().Add(configExpiry), + } +} + +// s2aAddresses contains the plaintext and/or MTLS S2A addresses. +type s2aAddresses struct { + // PlaintextAddress is the plaintext address to reach S2A + PlaintextAddress string `json:"plaintext_address"` + // MTLSAddress is the MTLS address to reach S2A + MTLSAddress string `json:"mtls_address"` +} + +// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs. +type mtlsConfig struct { + S2A *s2aAddresses `json:"s2a"` + Expiry time.Time +} + +func (c *mtlsConfig) Valid() bool { + return c != nil && c.S2A != nil && !c.expired() +} +func (c *mtlsConfig) expired() bool { + return c.Expiry.Before(time.Now()) +} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 76efdb22..3a3874df 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -46,6 +46,7 @@ type DialSettings struct { SkipValidation bool ImpersonationConfig *impersonate.Config EnableDirectPath bool + EnableDirectPathXds bool AllowNonDefaultServiceAccount bool // Google API system parameters. For more information please read: diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index e96a3316..46ad187e 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.104.0" +const Version = "0.126.0" diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index 343a5a96..3b8461d1 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -67,6 +67,21 @@ func (e enableDirectPath) Apply(o *internal.DialSettings) { o.EnableDirectPath = bool(e) } +// EnableDirectPathXds returns a ClientOption that overrides the default +// DirectPath type. It is only valid when DirectPath is enabled. +// +// It should only be used internally by generated clients. +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func EnableDirectPathXds() option.ClientOption { + return enableDirectPathXds(true) +} + +type enableDirectPathXds bool + +func (x enableDirectPathXds) Apply(o *internal.DialSettings) { + o.EnableDirectPathXds = bool(x) +} + // AllowNonDefaultServiceAccount returns a ClientOption that overrides the default // requirement for using the default service account for DirectPath. // @@ -134,3 +149,10 @@ type withCreds google.Credentials func (w *withCreds) Apply(o *internal.DialSettings) { o.InternalCredentials = (*google.Credentials)(w) } + +// EmbeddableAdapter is a no-op option.ClientOption that allow libraries to +// create their own client options by embedding this type into their own +// client-specific option wrapper. See example for usage. +type EmbeddableAdapter struct{} + +func (*EmbeddableAdapter) Apply(_ *internal.DialSettings) {} diff --git a/vendor/google.golang.org/api/transport/http/configure_http2_go116.go b/vendor/google.golang.org/api/transport/http/configure_http2_go116.go deleted file mode 100644 index 305a6929..00000000 --- a/vendor/google.golang.org/api/transport/http/configure_http2_go116.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2021 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.16 -// +build go1.16 - -package http - -import ( - "net/http" - "time" - - "golang.org/x/net/http2" -) - -// configureHTTP2 configures the ReadIdleTimeout HTTP/2 option for the -// transport. This allows broken idle connections to be pruned more quickly, -// preventing the client from attempting to re-use connections that will no -// longer work. -func configureHTTP2(trans *http.Transport) { - http2Trans, err := http2.ConfigureTransports(trans) - if err == nil { - http2Trans.ReadIdleTimeout = time.Second * 31 - } -} diff --git a/vendor/google.golang.org/api/transport/http/configure_http2_not_go116.go b/vendor/google.golang.org/api/transport/http/configure_http2_not_go116.go deleted file mode 100644 index d2742d28..00000000 --- a/vendor/google.golang.org/api/transport/http/configure_http2_not_go116.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.16 -// +build !go1.16 - -package http - -import ( - "net/http" -) - -// configureHTTP2 configures the ReadIdleTimeout HTTP/2 option for the -// transport. The interface to do this is only available in Go 1.16 and up, so -// this performs a no-op. -func configureHTTP2(trans *http.Transport) {} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index cab709f0..eca0c3ba 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -16,13 +16,13 @@ import ( "time" "go.opencensus.io/plugin/ochttp" + "golang.org/x/net/http2" "golang.org/x/oauth2" "google.golang.org/api/googleapi/transport" "google.golang.org/api/internal" + "google.golang.org/api/internal/cert" "google.golang.org/api/option" - "google.golang.org/api/transport/cert" "google.golang.org/api/transport/http/internal/propagation" - "google.golang.org/api/transport/internal/dca" ) // NewClient returns an HTTP client for use communicating with a Google cloud @@ -33,7 +33,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, if err != nil { return nil, "", err } - clientCertSource, endpoint, err := dca.GetClientCertificateSourceAndEndpoint(settings) + clientCertSource, dialTLSContext, endpoint, err := internal.GetHTTPTransportConfigAndEndpoint(settings) if err != nil { return nil, "", err } @@ -41,7 +41,8 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, if settings.HTTPClient != nil { return settings.HTTPClient, endpoint, nil } - trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource), settings) + + trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource, dialTLSContext), settings) if err != nil { return nil, "", err } @@ -65,7 +66,6 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna paramTransport := ¶meterTransport{ base: base, userAgent: settings.UserAgent, - quotaProject: settings.QuotaProject, requestReason: settings.RequestReason, } var trans http.RoundTripper = paramTransport @@ -74,6 +74,7 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna case settings.NoAuth: // Do nothing. case settings.APIKey != "": + paramTransport.quotaProject = internal.GetQuotaProject(nil, settings.QuotaProject) trans = &transport.APIKey{ Transport: trans, Key: settings.APIKey, @@ -83,10 +84,7 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna if err != nil { return nil, err } - if paramTransport.quotaProject == "" { - paramTransport.quotaProject = internal.QuotaProjectFromCreds(creds) - } - + paramTransport.quotaProject = internal.GetQuotaProject(creds, settings.QuotaProject) ts := creds.TokenSource if settings.ImpersonationConfig == nil && settings.TokenSource != nil { ts = settings.TokenSource @@ -155,7 +153,7 @@ var appengineUrlfetchHook func(context.Context) http.RoundTripper // Otherwise, use a default transport, taking most defaults from // http.DefaultTransport. // If TLSCertificate is available, set TLSClientConfig as well. -func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) http.RoundTripper { +func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { if appengineUrlfetchHook != nil { return appengineUrlfetchHook(ctx) } @@ -174,14 +172,27 @@ func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) htt GetClientCertificate: clientCertSource, } } + if dialTLSContext != nil { + // If DialTLSContext is set, TLSClientConfig wil be ignored + trans.DialTLSContext = dialTLSContext + } - // If possible, configure http2 transport in order to use ReadIdleTimeout - // setting. This can only be done in Go 1.16 and up. configureHTTP2(trans) return trans } +// configureHTTP2 configures the ReadIdleTimeout HTTP/2 option for the +// transport. This allows broken idle connections to be pruned more quickly, +// preventing the client from attempting to re-use connections that will no +// longer work. +func configureHTTP2(trans *http.Transport) { + http2Trans, err := http2.ConfigureTransports(trans) + if err == nil { + http2Trans.ReadIdleTimeout = time.Second * 31 + } +} + // fallbackBaseTransport is used in 0 { + var cancel context.CancelFunc + dialCtx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } + + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.Atoi(portStr) + if err != nil { + return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err) + } + + var prot pb.CreateSocketRequest_SocketProtocol + switch protocol { + case "tcp": + prot = pb.CreateSocketRequest_TCP + case "udp": + prot = pb.CreateSocketRequest_UDP + default: + return nil, fmt.Errorf("socket: unknown protocol %q", protocol) + } + + packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host) + if err != nil { + return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) + } + if len(packedAddrs) == 0 { + return nil, fmt.Errorf("no addresses for %q", host) + } + + packedAddr := packedAddrs[0] // use first address + fam := pb.CreateSocketRequest_IPv4 + if len(packedAddr) == net.IPv6len { + fam = pb.CreateSocketRequest_IPv6 + } + + req := &pb.CreateSocketRequest{ + Family: fam.Enum(), + Protocol: prot.Enum(), + RemoteIp: &pb.AddressPort{ + Port: proto.Int32(int32(port)), + PackedAddress: packedAddr, + }, + } + if resolved { + req.RemoteIp.HostnameHint = &host + } + res := &pb.CreateSocketReply{} + if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil { + return nil, err + } + + return &Conn{ + ctx: ctx, + desc: res.GetSocketDescriptor(), + prot: prot, + local: res.ProxyExternalIp, + remote: req.RemoteIp, + }, nil +} + +// LookupIP returns the given host's IP addresses. +func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { + packedAddrs, _, err := resolve(ctx, ipFamilies, host) + if err != nil { + return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) + } + addrs = make([]net.IP, len(packedAddrs)) + for i, pa := range packedAddrs { + addrs[i] = net.IP(pa) + } + return addrs, nil +} + +func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) { + // Check if it's an IP address. + if ip := net.ParseIP(host); ip != nil { + if ip := ip.To4(); ip != nil { + return [][]byte{ip}, false, nil + } + return [][]byte{ip}, false, nil + } + + req := &pb.ResolveRequest{ + Name: &host, + AddressFamilies: fams, + } + res := &pb.ResolveReply{} + if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil { + // XXX: need to map to pb.ResolveReply_ErrorCode? + return nil, false, err + } + return res.PackedAddress, true, nil +} + +// withDeadline is like context.WithDeadline, except it ignores the zero deadline. +func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { + if deadline.IsZero() { + return parent, func() {} + } + return context.WithDeadline(parent, deadline) +} + +// Conn represents a socket connection. +// It implements net.Conn. +type Conn struct { + ctx context.Context + desc string + offset int64 + + prot pb.CreateSocketRequest_SocketProtocol + local, remote *pb.AddressPort + + readDeadline, writeDeadline time.Time // optional +} + +// SetContext sets the context that is used by this Conn. +// It is usually used only when using a Conn that was created in a different context, +// such as when a connection is created during a warmup request but used while +// servicing a user request. +func (cn *Conn) SetContext(ctx context.Context) { + cn.ctx = ctx +} + +func (cn *Conn) Read(b []byte) (n int, err error) { + const maxRead = 1 << 20 + if len(b) > maxRead { + b = b[:maxRead] + } + + req := &pb.ReceiveRequest{ + SocketDescriptor: &cn.desc, + DataSize: proto.Int32(int32(len(b))), + } + res := &pb.ReceiveReply{} + if !cn.readDeadline.IsZero() { + req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) + } + ctx, cancel := withDeadline(cn.ctx, cn.readDeadline) + defer cancel() + if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil { + return 0, err + } + if len(res.Data) == 0 { + return 0, io.EOF + } + if len(res.Data) > len(b) { + return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) + } + return copy(b, res.Data), nil +} + +func (cn *Conn) Write(b []byte) (n int, err error) { + const lim = 1 << 20 // max per chunk + + for n < len(b) { + chunk := b[n:] + if len(chunk) > lim { + chunk = chunk[:lim] + } + + req := &pb.SendRequest{ + SocketDescriptor: &cn.desc, + Data: chunk, + StreamOffset: &cn.offset, + } + res := &pb.SendReply{} + if !cn.writeDeadline.IsZero() { + req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) + } + ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline) + defer cancel() + if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil { + // assume zero bytes were sent in this RPC + break + } + n += int(res.GetDataSent()) + cn.offset += int64(res.GetDataSent()) + } + + return +} + +func (cn *Conn) Close() error { + req := &pb.CloseRequest{ + SocketDescriptor: &cn.desc, + } + res := &pb.CloseReply{} + if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil { + return err + } + cn.desc = "CLOSED" + return nil +} + +func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr { + if ap == nil { + return nil + } + switch prot { + case pb.CreateSocketRequest_TCP: + return &net.TCPAddr{ + IP: net.IP(ap.PackedAddress), + Port: int(*ap.Port), + } + case pb.CreateSocketRequest_UDP: + return &net.UDPAddr{ + IP: net.IP(ap.PackedAddress), + Port: int(*ap.Port), + } + } + panic("unknown protocol " + prot.String()) +} + +func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) } +func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) } + +func (cn *Conn) SetDeadline(t time.Time) error { + cn.readDeadline = t + cn.writeDeadline = t + return nil +} + +func (cn *Conn) SetReadDeadline(t time.Time) error { + cn.readDeadline = t + return nil +} + +func (cn *Conn) SetWriteDeadline(t time.Time) error { + cn.writeDeadline = t + return nil +} + +// KeepAlive signals that the connection is still in use. +// It may be called to prevent the socket being closed due to inactivity. +func (cn *Conn) KeepAlive() error { + req := &pb.GetSocketNameRequest{ + SocketDescriptor: &cn.desc, + } + res := &pb.GetSocketNameReply{} + return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res) +} + +func init() { + internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go new file mode 100644 index 00000000..c804169a --- /dev/null +++ b/vendor/google.golang.org/appengine/socket/socket_vm.go @@ -0,0 +1,64 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package socket + +import ( + "net" + "time" + + "golang.org/x/net/context" +) + +// Dial connects to the address addr on the network protocol. +// The address format is host:port, where host may be a hostname or an IP address. +// Known protocols are "tcp" and "udp". +// The returned connection satisfies net.Conn, and is valid while ctx is valid; +// if the connection is to be used after ctx becomes invalid, invoke SetContext +// with the new context. +func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { + conn, err := net.Dial(protocol, addr) + if err != nil { + return nil, err + } + return &Conn{conn}, nil +} + +// DialTimeout is like Dial but takes a timeout. +// The timeout includes name resolution, if required. +func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { + conn, err := net.DialTimeout(protocol, addr, timeout) + if err != nil { + return nil, err + } + return &Conn{conn}, nil +} + +// LookupIP returns the given host's IP addresses. +func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { + return net.LookupIP(host) +} + +// Conn represents a socket connection. +// It implements net.Conn. +type Conn struct { + net.Conn +} + +// SetContext sets the context that is used by this Conn. +// It is usually used only when using a Conn that was created in a different context, +// such as when a connection is created during a warmup request but used while +// servicing a user request. +func (cn *Conn) SetContext(ctx context.Context) { + // This function is not required in App Engine "flexible environment". +} + +// KeepAlive signals that the connection is still in use. +// It may be called to prevent the socket being closed due to inactivity. +func (cn *Conn) KeepAlive() error { + // This function is not required in App Engine "flexible environment". + return nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go index 3a47b902..cc5d52fb 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/code.proto package code @@ -44,7 +44,7 @@ const ( type Code int32 const ( - // Not an error; returned on success + // Not an error; returned on success. // // HTTP Mapping: 200 OK Code_OK Code = 0 @@ -78,7 +78,7 @@ const ( // Some requested entity (e.g., file or directory) was not found. // // Note to server developers: if a request is denied for an entire class - // of users, such as gradual feature rollout or undocumented whitelist, + // of users, such as gradual feature rollout or undocumented allowlist, // `NOT_FOUND` may be used. If a request is denied for some users within // a class of users, such as user-based access control, `PERMISSION_DENIED` // must be used. @@ -118,15 +118,16 @@ const ( // // Service implementors can use the following guidelines to decide // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: - // (a) Use `UNAVAILABLE` if the client can retry just the failing call. - // (b) Use `ABORTED` if the client should retry at a higher level - // (e.g., when a client-specified test-and-set fails, indicating the - // client should restart a read-modify-write sequence). - // (c) Use `FAILED_PRECONDITION` if the client should not retry until - // the system state has been explicitly fixed. E.g., if an "rmdir" - // fails because the directory is non-empty, `FAILED_PRECONDITION` - // should be returned since the client should not retry unless - // the files are deleted from the directory. + // + // (a) Use `UNAVAILABLE` if the client can retry just the failing call. + // (b) Use `ABORTED` if the client should retry at a higher level. For + // example, when a client-specified test-and-set fails, indicating the + // client should restart a read-modify-write sequence. + // (c) Use `FAILED_PRECONDITION` if the client should not retry until + // the system state has been explicitly fixed. For example, if an "rmdir" + // fails because the directory is non-empty, `FAILED_PRECONDITION` + // should be returned since the client should not retry unless + // the files are deleted from the directory. // // HTTP Mapping: 400 Bad Request Code_FAILED_PRECONDITION Code = 9 diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 2f3ab924..7bd161e4 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/error_details.proto package errdetails @@ -36,6 +36,112 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// Describes the cause of the error with structured details. +// +// Example of an error when contacting the "pubsub.googleapis.com" API when it +// is not enabled: +// +// { "reason": "API_DISABLED" +// "domain": "googleapis.com" +// "metadata": { +// "resource": "projects/123", +// "service": "pubsub.googleapis.com" +// } +// } +// +// This response indicates that the pubsub.googleapis.com API is not enabled. +// +// Example of an error that is returned when attempting to create a Spanner +// instance in a region that is out of stock: +// +// { "reason": "STOCKOUT" +// "domain": "spanner.googleapis.com", +// "metadata": { +// "availableRegions": "us-central1,us-east2" +// } +// } +type ErrorInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The reason of the error. This is a constant value that identifies the + // proximate cause of the error. Error reasons are unique within a particular + // domain of errors. This should be at most 63 characters and match a + // regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, which represents + // UPPER_SNAKE_CASE. + Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` + // The logical grouping to which the "reason" belongs. The error domain + // is typically the registered service name of the tool or product that + // generates the error. Example: "pubsub.googleapis.com". If the error is + // generated by some common infrastructure, the error domain must be a + // globally unique value that identifies the infrastructure. For Google API + // infrastructure, the error domain is "googleapis.com". + Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` + // Additional structured details about this error. + // + // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // length. When identifying the current value of an exceeded limit, the units + // should be contained in the key, not the value. For example, rather than + // {"instanceLimit": "100/request"}, should be returned as, + // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // instances that can be created in a single (batch) request. + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ErrorInfo) Reset() { + *x = ErrorInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorInfo) ProtoMessage() {} + +func (x *ErrorInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead. +func (*ErrorInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0} +} + +func (x *ErrorInfo) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *ErrorInfo) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *ErrorInfo) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + // Describes when the clients can retry a failed request. Clients could ignore // the recommendation here or retry when this information is missing from error // responses. @@ -61,7 +167,7 @@ type RetryInfo struct { func (x *RetryInfo) Reset() { *x = RetryInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[0] + mi := &file_google_rpc_error_details_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -74,7 +180,7 @@ func (x *RetryInfo) String() string { func (*RetryInfo) ProtoMessage() {} func (x *RetryInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[0] + mi := &file_google_rpc_error_details_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -87,7 +193,7 @@ func (x *RetryInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use RetryInfo.ProtoReflect.Descriptor instead. func (*RetryInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0} + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1} } func (x *RetryInfo) GetRetryDelay() *durationpb.Duration { @@ -112,7 +218,7 @@ type DebugInfo struct { func (x *DebugInfo) Reset() { *x = DebugInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[1] + mi := &file_google_rpc_error_details_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -125,7 +231,7 @@ func (x *DebugInfo) String() string { func (*DebugInfo) ProtoMessage() {} func (x *DebugInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[1] + mi := &file_google_rpc_error_details_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -138,7 +244,7 @@ func (x *DebugInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use DebugInfo.ProtoReflect.Descriptor instead. func (*DebugInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1} + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2} } func (x *DebugInfo) GetStackEntries() []string { @@ -178,7 +284,7 @@ type QuotaFailure struct { func (x *QuotaFailure) Reset() { *x = QuotaFailure{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[2] + mi := &file_google_rpc_error_details_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -191,7 +297,7 @@ func (x *QuotaFailure) String() string { func (*QuotaFailure) ProtoMessage() {} func (x *QuotaFailure) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[2] + mi := &file_google_rpc_error_details_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -204,7 +310,7 @@ func (x *QuotaFailure) ProtoReflect() protoreflect.Message { // Deprecated: Use QuotaFailure.ProtoReflect.Descriptor instead. func (*QuotaFailure) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2} + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3} } func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { @@ -214,111 +320,6 @@ func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { return nil } -// Describes the cause of the error with structured details. -// -// Example of an error when contacting the "pubsub.googleapis.com" API when it -// is not enabled: -// -// { "reason": "API_DISABLED" -// "domain": "googleapis.com" -// "metadata": { -// "resource": "projects/123", -// "service": "pubsub.googleapis.com" -// } -// } -// -// This response indicates that the pubsub.googleapis.com API is not enabled. -// -// Example of an error that is returned when attempting to create a Spanner -// instance in a region that is out of stock: -// -// { "reason": "STOCKOUT" -// "domain": "spanner.googleapis.com", -// "metadata": { -// "availableRegions": "us-central1,us-east2" -// } -// } -type ErrorInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The reason of the error. This is a constant value that identifies the - // proximate cause of the error. Error reasons are unique within a particular - // domain of errors. This should be at most 63 characters and match - // /[A-Z0-9_]+/. - Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` - // The logical grouping to which the "reason" belongs. The error domain - // is typically the registered service name of the tool or product that - // generates the error. Example: "pubsub.googleapis.com". If the error is - // generated by some common infrastructure, the error domain must be a - // globally unique value that identifies the infrastructure. For Google API - // infrastructure, the error domain is "googleapis.com". - Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` - // Additional structured details about this error. - // - // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in - // length. When identifying the current value of an exceeded limit, the units - // should be contained in the key, not the value. For example, rather than - // {"instanceLimit": "100/request"}, should be returned as, - // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of - // instances that can be created in a single (batch) request. - Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *ErrorInfo) Reset() { - *x = ErrorInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ErrorInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ErrorInfo) ProtoMessage() {} - -func (x *ErrorInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead. -func (*ErrorInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3} -} - -func (x *ErrorInfo) GetReason() string { - if x != nil { - return x.Reason - } - return "" -} - -func (x *ErrorInfo) GetDomain() string { - if x != nil { - return x.Domain - } - return "" -} - -func (x *ErrorInfo) GetMetadata() map[string]string { - if x != nil { - return x.Metadata - } - return nil -} - // Describes what preconditions have failed. // // For example, if an RPC failed because it required the Terms of Service to be @@ -495,7 +496,8 @@ type ResourceInfo struct { ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` // The name of the resource being accessed. For example, a shared calendar // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current - // error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + // error is + // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` // The owner of the resource (optional). // For example, "user:" or "project: google.protobuf.Duration - 10, // 1: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation - 11, // 2: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry + 10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry + 15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration + 11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link @@ -1089,7 +1125,7 @@ func file_google_rpc_error_details_proto_init() { } if !protoimpl.UnsafeEnabled { file_google_rpc_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RetryInfo); i { + switch v := v.(*ErrorInfo); i { case 0: return &v.state case 1: @@ -1101,7 +1137,7 @@ func file_google_rpc_error_details_proto_init() { } } file_google_rpc_error_details_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DebugInfo); i { + switch v := v.(*RetryInfo); i { case 0: return &v.state case 1: @@ -1113,7 +1149,7 @@ func file_google_rpc_error_details_proto_init() { } } file_google_rpc_error_details_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QuotaFailure); i { + switch v := v.(*DebugInfo); i { case 0: return &v.state case 1: @@ -1125,7 +1161,7 @@ func file_google_rpc_error_details_proto_init() { } } file_google_rpc_error_details_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ErrorInfo); i { + switch v := v.(*QuotaFailure); i { case 0: return &v.state case 1: @@ -1208,7 +1244,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QuotaFailure_Violation); i { case 0: return &v.state diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index f34a38e4..a6b50818 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/status.proto package status @@ -48,11 +48,13 @@ type Status struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index 52338d00..608aa6e1 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -20,6 +20,15 @@ How to get your contributions merged smoothly and quickly. both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. +- If you are searching for features to work on, issues labeled [Status: Help + Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) + is a great place to start. These issues are well-documented and usually can be + resolved with a single pull request. + +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file + and update the year. + - The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a @@ -32,14 +41,18 @@ How to get your contributions merged smoothly and quickly. - Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. -- Don't fix code style and formatting unless you are already changing that line - to address an issue. PRs with irrelevant changes won't be merged. If you do - want to fix formatting or style, do that in a separate PR. +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the + benefits of the change. - Unless your PR is trivial, you should expect there will be reviewer comments - that you'll need to address before merging. We expect you to be reasonably - responsive to those comments, otherwise the PR will be closed after 2-3 weeks - of inactivity. + that you'll need to address before merging. We'll mark it as `Status: Requires + Reporter Clarification` if we expect you to respond to these comments in a + timely manner. If the PR remains inactive for 6 days, it will be marked as + `stale` and automatically close 7 days after that if we don't hear back from + you. - Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 0e6ae69a..1bc92248 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -14,21 +14,14 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Installation -With [Go module][] support (Go 1.11+), simply add the following import +Simply add the following import to your code, and then `go [build|run|test]` +will automatically fetch the necessary dependencies: + ```go import "google.golang.org/grpc" ``` -to your code, and then `go [build|run|test]` will automatically fetch the -necessary dependencies. - -Otherwise, to install the `grpc-go` package, run the following command: - -```console -$ go get -u google.golang.org/grpc -``` - > **Note:** If you are trying to access `grpc-go` from **China**, see the > [FAQ](#FAQ) below. @@ -56,15 +49,6 @@ To build Go code, there are several options: - Set up a VPN and access google.golang.org through that. -- Without Go module support: `git clone` the repo manually: - - ```sh - git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc - ``` - - You will need to do the same for all of grpc's dependencies in `golang.org`, - e.g. `golang.org/x/net`. - - With Go module support: it is possible to use the `replace` feature of `go mod` to create aliases for golang.org packages. In your project's directory: @@ -76,33 +60,13 @@ To build Go code, there are several options: ``` Again, this will need to be done for all transitive dependencies hosted on - golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). + golang.org as well. For details, refer to [golang/go issue + #28652](https://github.com/golang/go/issues/28652). ### Compiling error, undefined: grpc.SupportPackageIsVersion -#### If you are using Go modules: - -Ensure your gRPC-Go version is `require`d at the appropriate version in -the same module containing the generated `.pb.go` files. For example, -`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: - -```go -module - -require ( - google.golang.org/grpc v1.27.0 -) -``` - -#### If you are *not* using Go modules: - -Update the `proto` package, gRPC package, and rebuild the `.proto` files: - -```sh -go get -u github.com/golang/protobuf/{proto,protoc-gen-go} -go get -u google.golang.org/grpc -protoc --go_out=plugins=grpc:. *.proto -``` +Please update to the latest version of gRPC-Go using +`go get google.golang.org/grpc`. ### How to turn on logging @@ -121,9 +85,11 @@ possible reasons, including: 1. mis-configured transport credentials, connection failed on handshaking 1. bytes disrupted, possibly by a proxy in between 1. server shutdown - 1. Keepalive parameters caused connection shutdown, for example if you have configured - your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). - If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + 1. Keepalive parameters caused connection shutdown, for example if you have + configured your server to terminate connections regularly to [trigger DNS + lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your + [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), to allow longer RPC calls to finish. It can be tricky to debug this because the error happens on the client side but diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 02f5dc53..712fef4d 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -25,30 +25,35 @@ // later release. package attributes +import ( + "fmt" + "strings" +) + // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own // types for keys. Values should not be modified after they are added to an // Attributes or if they were received from one. If values implement 'Equal(o -// interface{}) bool', it will be called by (*Attributes).Equal to determine -// whether two values with the same key should be considered equal. +// any) bool', it will be called by (*Attributes).Equal to determine whether +// two values with the same key should be considered equal. type Attributes struct { - m map[interface{}]interface{} + m map[any]any } // New returns a new Attributes containing the key/value pair. -func New(key, value interface{}) *Attributes { - return &Attributes{m: map[interface{}]interface{}{key: value}} +func New(key, value any) *Attributes { + return &Attributes{m: map[any]any{key: value}} } // WithValue returns a new Attributes containing the previous keys and values // and the new key/value pair. If the same key appears multiple times, the // last value overwrites all previous values for that key. To remove an // existing key, use a nil value. value should not be modified later. -func (a *Attributes) WithValue(key, value interface{}) *Attributes { +func (a *Attributes) WithValue(key, value any) *Attributes { if a == nil { return New(key, value) } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} + n := &Attributes{m: make(map[any]any, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } @@ -58,20 +63,19 @@ func (a *Attributes) WithValue(key, value interface{}) *Attributes { // Value returns the value associated with these attributes for key, or nil if // no value is associated with key. The returned value should not be modified. -func (a *Attributes) Value(key interface{}) interface{} { +func (a *Attributes) Value(key any) any { if a == nil { return nil } return a.m[key] } -// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) -// bool' is implemented for a value in the attributes, it is called to -// determine if the value matches the one stored in the other attributes. If -// Equal is not implemented, standard equality is used to determine if the two -// values are equal. Note that some types (e.g. maps) aren't comparable by -// default, so they must be wrapped in a struct, or in an alias type, with Equal -// defined. +// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is +// implemented for a value in the attributes, it is called to determine if the +// value matches the one stored in the other attributes. If Equal is not +// implemented, standard equality is used to determine if the two values are +// equal. Note that some types (e.g. maps) aren't comparable by default, so +// they must be wrapped in a struct, or in an alias type, with Equal defined. func (a *Attributes) Equal(o *Attributes) bool { if a == nil && o == nil { return true @@ -88,7 +92,7 @@ func (a *Attributes) Equal(o *Attributes) bool { // o missing element of a return false } - if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if eq, ok := v.(interface{ Equal(o any) bool }); ok { if !eq.Equal(ov) { return false } @@ -99,3 +103,39 @@ func (a *Attributes) Equal(o *Attributes) bool { } return true } + +// String prints the attribute map. If any key or values throughout the map +// implement fmt.Stringer, it calls that method and appends. +func (a *Attributes) String() string { + var sb strings.Builder + sb.WriteString("{") + first := true + for k, v := range a.m { + if !first { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) + first = false + } + sb.WriteString("}") + return sb.String() +} + +func str(x any) string { + if v, ok := x.(fmt.Stringer); ok { + return v.String() + } else if v, ok := x.(string); ok { + return v + } + return fmt.Sprintf("<%p>", x) +} + +// MarshalJSON helps implement the json.Marshaler interface, thereby rendering +// the Attributes correctly when printing (via pretty.JSON) structs containing +// Attributes as fields. +// +// Is it impossible to unmarshal attributes from a JSON representation and this +// method is meant only for debugging purposes. +func (a *Attributes) MarshalJSON() ([]byte, error) { + return []byte(a.String()), nil +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 392b21fb..b6377f44 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -105,8 +105,8 @@ type SubConn interface { // // This will trigger a state transition for the SubConn. // - // Deprecated: This method is now part of the ClientConn interface and will - // eventually be removed from here. + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() @@ -115,6 +115,13 @@ type SubConn interface { // creates a new one and returns it. Returns a close function which must // be called when the Producer is no longer needed. GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() } // NewSubConnOptions contains options to create new SubConn. @@ -129,6 +136,11 @@ type NewSubConnOptions struct { // HealthCheckEnabled indicates whether health check service should be // enabled on this SubConn HealthCheckEnabled bool + // StateListener is called when the state of the subconn changes. If nil, + // Balancer.UpdateSubConnState will be called instead. Will never be + // invoked until after Connect() is called on the SubConn created with + // these options. + StateListener func(SubConnState) } // State contains the balancer's state relevant to the gRPC ClientConn. @@ -150,16 +162,24 @@ type ClientConn interface { // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. // Behaviors of the SubConn can be controlled by options. + // + // Deprecated: please be aware that in a future version, SubConns will only + // support one address per SubConn. NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. + // + // Deprecated: use SubConn.Shutdown instead. RemoveSubConn(SubConn) // UpdateAddresses updates the addresses used in the passed in SubConn. // gRPC checks if the currently connected address is still in the new list. // If so, the connection will be kept. Else, the connection will be // gracefully closed, and a new connection will be created. // - // This will trigger a state transition for the SubConn. + // This may trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses(SubConn, []resolver.Address) // UpdateState notifies gRPC that the balancer's internal state has @@ -250,7 +270,7 @@ type DoneInfo struct { // trailing metadata. // // The only supported type now is *orca_v3.LoadReport. - ServerLoad interface{} + ServerLoad any } var ( @@ -279,6 +299,14 @@ type PickResult struct { // type, Done may not be called. May be nil if the balancer does not wish // to be notified when the RPC completes. Done func(DoneInfo) + + // Metadata provides a way for LB policies to inject arbitrary per-call + // metadata. Any metadata returned here will be merged with existing + // metadata added by the client application. + // + // LB policies with child policies are responsible for propagating metadata + // injected by their children to the ClientConn, as part of Pick(). + Metadata metadata.MD } // TransientFailureError returns e. It exists for backward compatibility and @@ -335,9 +363,13 @@ type Balancer interface { ResolverError(error) // UpdateSubConnState is called by gRPC when the state of a SubConn // changes. + // + // Deprecated: Use NewSubConnOptions.StateListener when creating the + // SubConn instead. UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. + // Close closes the balancer. The balancer is not currently required to + // call SubConn.Shutdown for its existing SubConns; however, this will be + // required in a future release, so it is recommended. Close() } @@ -382,15 +414,14 @@ var ErrBadResolverState = errors.New("bad resolver state") type ProducerBuilder interface { // Build creates a Producer. The first parameter is always a // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as interface{} to avoid a - // dependency cycle. Should also return a close function that will be - // called when all references to the Producer have been given up. - Build(grpcClientConnInterface interface{}) (p Producer, close func()) + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Should also return a close function that will be called when all + // references to the Producer have been given up. + Build(grpcClientConnInterface any) (p Producer, close func()) } // A Producer is a type shared among potentially many consumers. It is // associated with a SubConn, and an implementation will typically contain // other methods to provide additional functionality, e.g. configuration or // subscription registration. -type Producer interface { -} +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 3929c26d..a7f1eeec 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -105,7 +105,12 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { addrsSet.Set(a, nil) if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: b.config.HealthCheck, + StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue @@ -121,10 +126,10 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { sc := sci.(balancer.SubConn) // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { - b.cc.RemoveSubConn(sc) + sc.Shutdown() b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in UpdateSubConnState. + // The entry will be deleted in updateSubConnState. } } // If resolver state contains no addresses, return an error so ClientConn @@ -177,7 +182,12 @@ func (b *baseBalancer) regeneratePicker() { b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } +// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState if logger.V(2) { logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) @@ -204,8 +214,8 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su case connectivity.Idle: sc.Connect() case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) case connectivity.TransientFailure: // Save error to be reported via picker. @@ -226,7 +236,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } // Close is a nop because base balancer doesn't have internal state to clean up, -// and it doesn't need to call RemoveSubConn for the SubConns. +// and it doesn't need to call Shutdown for the SubConns. func (b *baseBalancer) Close() { } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 0359956d..a4411c22 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -25,14 +25,20 @@ import ( "sync" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" - "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/status" +) + +type ccbMode int + +const ( + ccbModeActive = iota + ccbModeIdle + ccbModeClosed + ccbModeExitingIdle ) // ccBalancerWrapper sits between the ClientConn and the Balancer. @@ -49,192 +55,89 @@ import ( // It uses the gracefulswitch.Balancer internally to ensure that balancer // switches happen in a graceful manner. type ccBalancerWrapper struct { - cc *ClientConn - - // Since these fields are accessed only from handleXxx() methods which are - // synchronized by the watcher goroutine, we do not need a mutex to protect - // these fields. + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + opts balancer.BuildOptions + + // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled in the serializer. Fields + // accessed *only* in these serializer callbacks, can therefore be accessed + // without a mutex. balancer *gracefulswitch.Balancer curBalancerName string - updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). - resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. - closed *grpcsync.Event // Indicates if close has been called. - done *grpcsync.Event // Indicates if close has completed its work. + // mu guards access to the below fields. Access to the serializer and its + // cancel function needs to be mutex protected because they are overwritten + // when the wrapper exits idle mode. + mu sync.Mutex + serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. + serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. + mode ccbMode // Tracks the current mode of the wrapper. } // newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer // is not created until the switchTo() method is invoked. func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(context.Background()) ccb := &ccBalancerWrapper{ - cc: cc, - updateCh: buffer.NewUnbounded(), - resultCh: buffer.NewUnbounded(), - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), + cc: cc, + opts: bopts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, } - go ccb.watcher() ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// The following xxxUpdate structs wrap the arguments received as part of the -// corresponding update. The watcher goroutine uses the 'type' of the update to -// invoke the appropriate handler routine to handle the update. - -type ccStateUpdate struct { - ccs *balancer.ClientConnState -} - -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} - -type exitIdleUpdate struct{} - -type resolverErrorUpdate struct { - err error -} - -type switchToUpdate struct { - name string -} - -type subConnUpdate struct { - acbw *acBalancerWrapper -} - -// watcher is a long-running goroutine which reads updates from a channel and -// invokes corresponding methods on the underlying balancer. It ensures that -// these methods are invoked in a synchronous fashion. It also ensures that -// these methods are invoked in the order in which the updates were received. -func (ccb *ccBalancerWrapper) watcher() { - for { - select { - case u := <-ccb.updateCh.Get(): - ccb.updateCh.Load() - if ccb.closed.HasFired() { - break - } - switch update := u.(type) { - case *ccStateUpdate: - ccb.handleClientConnStateChange(update.ccs) - case *scStateUpdate: - ccb.handleSubConnStateChange(update) - case *exitIdleUpdate: - ccb.handleExitIdle() - case *resolverErrorUpdate: - ccb.handleResolverError(update.err) - case *switchToUpdate: - ccb.handleSwitchTo(update.name) - case *subConnUpdate: - ccb.handleRemoveSubConn(update.acbw) - default: - logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) - } - case <-ccb.closed.Done(): - } - - if ccb.closed.HasFired() { - ccb.handleClose() - return - } - } -} - // updateClientConnState is invoked by grpc to push a ClientConnState update to // the underlying balancer. -// -// Unlike other methods invoked by grpc to push updates to the underlying -// balancer, this method cannot simply push the update onto the update channel -// and return. It needs to return the error returned by the underlying balancer -// back to grpc which propagates that to the resolver. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) - - var res interface{} - select { - case res = <-ccb.resultCh.Get(): - ccb.resultCh.Load() - case <-ccb.closed.Done(): - // Return early if the balancer wrapper is closed while we are waiting for - // the underlying balancer to process a ClientConnState update. - return nil - } - // If the returned error is nil, attempting to type assert to error leads to - // panic. So, this needs to handled separately. - if res == nil { - return nil + ccb.mu.Lock() + errCh := make(chan error, 1) + // Here and everywhere else where Schedule() is called, it is done with the + // lock held. But the lock guards only the scheduling part. The actual + // callback is called asynchronously without the lock being held. + ok := ccb.serializer.Schedule(func(_ context.Context) { + errCh <- ccb.balancer.UpdateClientConnState(*ccs) + }) + if !ok { + // If we are unable to schedule a function with the serializer, it + // indicates that it has been closed. A serializer is only closed when + // the wrapper is closed or is in idle. + ccb.mu.Unlock() + return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") } - return res.(error) -} - -// handleClientConnStateChange handles a ClientConnState update from the update -// channel and invokes the appropriate method on the underlying balancer. -// -// If the addresses specified in the update contain addresses of type "grpclb" -// and the selected LB policy is not "grpclb", these addresses will be filtered -// out and ccs will be modified with the updated address list. -func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { - if ccb.curBalancerName != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - var addrs []resolver.Address - for _, addr := range ccs.ResolverState.Addresses { - if addr.Type == resolver.GRPCLB { - continue - } - addrs = append(addrs, addr) - } - ccs.ResolverState.Addresses = addrs + ccb.mu.Unlock() + + // We get here only if the above call to Schedule succeeds, in which case it + // is guaranteed that the scheduled function will run. Therefore it is safe + // to block on this channel. + err := <-errCh + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) } - ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) + return err } // updateSubConnState is invoked by grpc to push a subConn state update to the // underlying balancer. func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { - // When updating addresses for a SubConn, if the address in use is not in - // the new addresses, the old ac will be tearDown() and a new ac will be - // created. tearDown() generates a state change with Shutdown state, we - // don't want the balancer to receive this state change. So before - // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and - // this function will be called with (nil, Shutdown). We don't need to call - // balancer method in this case. - if sc == nil { - return - } - ccb.updateCh.Put(&scStateUpdate{ - sc: sc, - state: s, - err: err, + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) }) -} - -// handleSubConnStateChange handles a SubConnState update from the update -// channel and invokes the appropriate method on the underlying balancer. -func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { - ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) -} - -func (ccb *ccBalancerWrapper) exitIdle() { - ccb.updateCh.Put(&exitIdleUpdate{}) -} - -func (ccb *ccBalancerWrapper) handleExitIdle() { - if ccb.cc.GetState() != connectivity.Idle { - return - } - ccb.balancer.ExitIdle() + ccb.mu.Unlock() } func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.updateCh.Put(&resolverErrorUpdate{err: err}) -} - -func (ccb *ccBalancerWrapper) handleResolverError(err error) { - ccb.balancer.ResolverError(err) + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.ResolverError(err) + }) + ccb.mu.Unlock() } // switchTo is invoked by grpc to instruct the balancer wrapper to switch to the @@ -248,24 +151,27 @@ func (ccb *ccBalancerWrapper) handleResolverError(err error) { // the ccBalancerWrapper keeps track of the current LB policy name, and skips // the graceful balancer switching process if the name does not change. func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.updateCh.Put(&switchToUpdate{name: name}) + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + // TODO: Other languages use case-sensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + ccb.buildLoadBalancingPolicy(name) + }) + ccb.mu.Unlock() } -// handleSwitchTo handles a balancer switch update from the update channel. It -// calls the SwitchTo() method on the gracefulswitch.Balancer with a -// balancer.Builder corresponding to name. If no balancer.Builder is registered -// for the given name, it uses the default LB policy which is "pick_first". -func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { - // TODO: Other languages use case-insensitive balancer registries. We should - // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. - if strings.EqualFold(ccb.curBalancerName, name) { - return - } - - // TODO: Ensure that name is a registered LB policy when we get here. - // We currently only validate the `loadBalancingConfig` field. We need to do - // the same for the `loadBalancingPolicy` field and reject the service config - // if the specified policy is not registered. +// buildLoadBalancingPolicy performs the following: +// - retrieve a balancer builder for the given name. Use the default LB +// policy, pick_first, if no LB policy with name is found in the registry. +// - instruct the gracefulswitch balancer to switch to the above builder. This +// will actually build the new balancer. +// - update the `curBalancerName` field +// +// Must be called from a serializer callback. +func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { builder := balancer.Get(name) if builder == nil { channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) @@ -281,26 +187,112 @@ func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { ccb.curBalancerName = builder.Name() } -// handleRemoveSucConn handles a request from the underlying balancer to remove -// a subConn. -// -// See comments in RemoveSubConn() for more details. -func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +func (ccb *ccBalancerWrapper) close() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.closeBalancer(ccbModeClosed) } -func (ccb *ccBalancerWrapper) close() { - ccb.closed.Fire() - <-ccb.done.Done() +// enterIdleMode is invoked by grpc when the channel enters idle mode upon +// expiry of idle_timeout. This call blocks until the balancer is closed. +func (ccb *ccBalancerWrapper) enterIdleMode() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") + ccb.closeBalancer(ccbModeIdle) +} + +// closeBalancer is invoked when the channel is being closed or when it enters +// idle mode upon expiry of idle_timeout. +func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { + ccb.mu.Unlock() + return + } + + ccb.mode = m + done := ccb.serializer.Done() + b := ccb.balancer + ok := ccb.serializer.Schedule(func(_ context.Context) { + // Close the serializer to ensure that no more calls from gRPC are sent + // to the balancer. + ccb.serializerCancel() + // Empty the current balancer name because we don't have a balancer + // anymore and also so that we act on the next call to switchTo by + // creating a new balancer specified by the new resolver. + ccb.curBalancerName = "" + }) + if !ok { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + + // Give enqueued callbacks a chance to finish before closing the balancer. + <-done + b.Close() } -func (ccb *ccBalancerWrapper) handleClose() { - ccb.balancer.Close() - ccb.done.Fire() +// exitIdleMode is invoked by grpc when the channel exits idle mode either +// because of an RPC or because of an invocation of the Connect() API. This +// recreates the balancer that was closed previously when entering idle mode. +// +// If the channel is not in idle mode, we know for a fact that we are here as a +// result of the user calling the Connect() method on the ClientConn. In this +// case, we can simply forward the call to the underlying balancer, instructing +// it to reconnect to the backends. +func (ccb *ccBalancerWrapper) exitIdleMode() { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed { + // Request to exit idle is a no-op when wrapper is already closed. + ccb.mu.Unlock() + return + } + + if ccb.mode == ccbModeIdle { + // Recreate the serializer which was closed when we entered idle. + ctx, cancel := context.WithCancel(context.Background()) + ccb.serializer = grpcsync.NewCallbackSerializer(ctx) + ccb.serializerCancel = cancel + } + + // The ClientConn guarantees that mutual exclusion between close() and + // exitIdleMode(), and since we just created a new serializer, we can be + // sure that the below function will be scheduled. + done := make(chan struct{}) + ccb.serializer.Schedule(func(_ context.Context) { + defer close(done) + + ccb.mu.Lock() + defer ccb.mu.Unlock() + + if ccb.mode != ccbModeIdle { + ccb.balancer.ExitIdle() + return + } + + // Gracefulswitch balancer does not support a switchTo operation after + // being closed. Hence we need to create a new one here. + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) + ccb.mode = ccbModeActive + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") + + }) + ccb.mu.Unlock() + + <-done +} + +func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { + ccb.mu.Lock() + defer ccb.mu.Unlock() + return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if len(addrs) <= 0 { + if ccb.isIdleOrClosed() { + return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") + } + + if len(addrs) == 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } ac, err := ccb.cc.newAddrConn(addrs, opts) @@ -308,32 +300,26 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} - acbw.ac.mu.Lock() + acbw := &acBalancerWrapper{ + ccb: ccb, + ac: ac, + producers: make(map[balancer.ProducerBuilder]*refCountedProducer), + stateListener: opts.StateListener, + } ac.acbw = acbw - acbw.ac.mu.Unlock() return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it - // was required to handle the RemoveSubConn() method asynchronously by pushing - // the update onto the update channel. This was done to avoid a deadlock as - // switchBalancer() was holding cc.mu when calling Close() on the old - // balancer, which would in turn call RemoveSubConn(). - // - // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this - // asynchronously is probably not required anymore since the switchTo() method - // handles the balancer switch by pushing the update onto the channel. - // TODO(easwars): Handle this inline. - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) + // The graceful switch balancer will never call this. + logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + if ccb.isIdleOrClosed() { + return + } + acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -342,6 +328,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + if ccb.isIdleOrClosed() { + return + } + // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is @@ -352,6 +342,10 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + if ccb.isIdleOrClosed() { + return + } + ccb.cc.resolveNow(o) } @@ -362,78 +356,57 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { + ac *addrConn // read-only + ccb *ccBalancerWrapper // read-only + stateListener func(balancer.SubConnState) + mu sync.Mutex - ac *addrConn producers map[balancer.ProducerBuilder]*refCountedProducer } -func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - acbw.mu.Lock() - defer acbw.mu.Unlock() - if len(addrs) <= 0 { - acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) - return - } - if !acbw.ac.tryUpdateAddrs(addrs) { - cc := acbw.ac.cc - opts := acbw.ac.scopts - acbw.ac.mu.Lock() - // Set old ac.acbw to nil so the Shutdown state update will be ignored - // by balancer. - // - // TODO(bar) the state transition could be wrong when tearDown() old ac - // and creating new ac, fix the transition. - acbw.ac.acbw = nil - acbw.ac.mu.Unlock() - acState := acbw.ac.getState() - acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) - - if acState == connectivity.Shutdown { - return - } +func (acbw *acBalancerWrapper) String() string { + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) +} - newAC, err := cc.newAddrConn(addrs, opts) - if err != nil { - channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) - return - } - acbw.ac = newAC - newAC.mu.Lock() - newAC.acbw = acbw - newAC.mu.Unlock() - if acState != connectivity.Idle { - go newAC.connect() - } - } +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.ac.updateAddrs(addrs) } func (acbw *acBalancerWrapper) Connect() { - acbw.mu.Lock() - defer acbw.mu.Unlock() go acbw.ac.connect() } -func (acbw *acBalancerWrapper) getAddrConn() *addrConn { - acbw.mu.Lock() - defer acbw.mu.Unlock() - return acbw.ac -} +func (acbw *acBalancerWrapper) Shutdown() { + ccb := acbw.ccb + if ccb.isIdleOrClosed() { + // It it safe to ignore this call when the balancer is closed or in idle + // because the ClientConn takes care of closing the connections. + // + // Not returning early from here when the balancer is closed or in idle + // leads to a deadlock though, because of the following sequence of + // calls when holding cc.mu: + // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> + // ccb.RemoveAddrConn --> cc.removeAddrConn + return + } -var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected") + ccb.cc.removeAddrConn(acbw.ac, errConnDrain) +} // NewStream begins a streaming RPC on the addrConn. If the addrConn is not -// ready, returns errSubConnNotReady. +// ready, blocks until it is or ctx expires. Returns an error when the context +// expires or the addrConn is shut down. func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport := acbw.ac.getReadyTransport() - if transport == nil { - return nil, errSubConnNotReady + transport, err := acbw.ac.getTransport(ctx) + if err != nil { + return nil, err } return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) } // Invoke performs a unary RPC. If the addrConn is not ready, returns // errSubConnNotReady. -func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 64a232f2..59548011 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,14 +18,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 +// protoc-gen-go v1.31.0 +// protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -41,10 +40,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Enumerates the type of event // Note the terminology is different from the RPC semantics // definition, but the same meaning is expressed here. diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index 9e20e4d3..788c89c1 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -26,7 +26,7 @@ import ( // received. This is typically called by generated code. // // All errors returned by Invoke are compatible with the status package. -func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -56,13 +56,13 @@ func combine(o1 []CallOption, o2 []CallOption) []CallOption { // received. This is typically called by generated code. // // DEPRECATED: Use ClientConn.Invoke instead. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { return cc.Invoke(ctx, method, args, reply, opts...) } var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} -func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 422639c7..ff7fea10 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -24,7 +24,6 @@ import ( "fmt" "math" "net/url" - "reflect" "strings" "sync" "sync/atomic" @@ -35,9 +34,12 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/idle" + "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -54,8 +56,6 @@ import ( const ( // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second - // must match grpclbName in grpclb/grpclb.go - grpclbName = "grpclb" ) var ( @@ -69,6 +69,9 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") + // errConnIdling indicates the the connection is being closed as the channel + // is moving to an idle mode due to inactivity. + errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" @@ -134,20 +137,41 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ - target: target, - csMgr: &connectivityStateManager{}, - conns: make(map[*addrConn]struct{}), - dopts: defaultDialOptions(), - blockingpicker: newPickerWrapper(), - czData: new(channelzData), - firstResolveEvent: grpcsync.NewEvent(), + target: target, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + czData: new(channelzData), } + + // We start the channel off in idle mode, but kick it out of idle at the end + // of this method, instead of waiting for the first RPC. Other gRPC + // implementations do wait for the first RPC to kick the channel out of + // idle. But doing so would be a major behavior change for our users who are + // used to seeing the channel active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, if at all we ever get to do that. + cc.idlenessState = ccIdlenessStateIdle + cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) + cc.exitIdleCond = sync.NewCond(&cc.mu) - for _, opt := range extraDialOptions { - opt.apply(&cc.dopts) + disableGlobalOpts := false + for _, opt := range opts { + if _, ok := opt.(*disableGlobalDialOptions); ok { + disableGlobalOpts = true + break + } + } + + if !disableGlobalOpts { + for _, opt := range globalDialOptions { + opt.apply(&cc.dopts) + } } for _, opt := range opts { @@ -163,40 +187,13 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - pid := cc.dopts.channelzParentID - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) - ted := &channelz.TraceEventDesc{ - Desc: "Channel created", - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) - cc.csMgr.channelzID = cc.channelzID + // Register ClientConn with channelz. + cc.channelzRegistration(target) - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return nil, errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return nil, errTransportCredsAndBundle - } - if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { - return nil, errNoTransportCredsInBundle - } - transportCreds := cc.dopts.copts.TransportCredentials - if transportCreds == nil { - transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() - } - if transportCreds.Info().SecurityProtocol == "insecure" { - for _, cd := range cc.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return nil, errTransportCredentialsMissing - } - } + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + + if err := cc.validateTransportCredentials(); err != nil { + return nil, err } if cc.dopts.defaultServiceConfigRawJSON != nil { @@ -234,35 +231,19 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - scSet := false - if cc.dopts.scChan != nil { - // Try to get an initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - scSet = true - } - default: - } - } if cc.dopts.bs == nil { cc.dopts.bs = backoff.DefaultExponential } // Determine the resolver to use. - resolverBuilder, err := cc.parseTargetAndFindResolver() - if err != nil { + if err := cc.parseTargetAndFindResolver(); err != nil { return nil, err } - cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts) - if err != nil { + if err = cc.determineAuthority(); err != nil { return nil, err } - channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) - if cc.dopts.scChan != nil && !scSet { + if cc.dopts.scChan != nil { // Blocking wait for the initial service config. select { case sc, ok := <-cc.dopts.scChan: @@ -278,57 +259,234 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * go cc.scWatcher() } + // This creates the name resolver, load balancer, blocking picker etc. + if err := cc.exitIdleMode(); err != nil { + return nil, err + } + + // Configure idleness support with configured idle timeout or default idle + // timeout duration. Idleness can be explicitly disabled by the user, by + // setting the dial option to 0. + cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger}) + + // Return early for non-blocking dials. + if !cc.dopts.block { + return cc, nil + } + + // A blocking dial blocks until the clientConn is ready. + for { + s := cc.GetState() + if s == connectivity.Idle { + cc.Connect() + } + if s == connectivity.Ready { + return cc, nil + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() + } + } +} + +// addTraceEvent is a helper method to add a trace event on the channel. If the +// channel is a nested one, the same event is also added on the parent channel. +func (cc *ClientConn) addTraceEvent(msg string) { + ted := &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel %s", msg), + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) +} + +type idler ClientConn + +func (i *idler) EnterIdleMode() error { + return (*ClientConn)(i).enterIdleMode() +} + +func (i *idler) ExitIdleMode() error { + return (*ClientConn)(i).exitIdleMode() +} + +// exitIdleMode moves the channel out of idle mode by recreating the name +// resolver and load balancer. +func (cc *ClientConn) exitIdleMode() error { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return errConnClosing + } + if cc.idlenessState != ccIdlenessStateIdle { + cc.mu.Unlock() + channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) + return nil + } + + defer func() { + // When Close() and exitIdleMode() race against each other, one of the + // following two can happen: + // - Close() wins the race and runs first. exitIdleMode() runs after, and + // sees that the ClientConn is already closed and hence returns early. + // - exitIdleMode() wins the race and runs first and recreates the balancer + // and releases the lock before recreating the resolver. If Close() runs + // in this window, it will wait for exitIdleMode to complete. + // + // We achieve this synchronization using the below condition variable. + cc.mu.Lock() + cc.idlenessState = ccIdlenessStateActive + cc.exitIdleCond.Signal() + cc.mu.Unlock() + }() + + cc.idlenessState = ccIdlenessStateExitingIdle + exitedIdle := false + if cc.blockingpicker == nil { + cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers) + } else { + cc.blockingpicker.exitIdleMode() + exitedIdle = true + } + var credsClone credentials.TransportCredentials if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }) + if cc.balancerWrapper == nil { + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }) + } else { + cc.balancerWrapper.exitIdleMode() + } + cc.firstResolveEvent = grpcsync.NewEvent() + cc.mu.Unlock() - // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) - if err != nil { - return nil, fmt.Errorf("failed to build resolver: %v", err) + // This needs to be called without cc.mu because this builds a new resolver + // which might update state or report error inline which needs to be handled + // by cc.updateResolverState() which also grabs cc.mu. + if err := cc.initResolverWrapper(credsClone); err != nil { + return err } + + if exitedIdle { + cc.addTraceEvent("exiting idle mode") + } + return nil +} + +// enterIdleMode puts the channel in idle mode, and as part of it shuts down the +// name resolver, load balancer and any subchannels. +func (cc *ClientConn) enterIdleMode() error { cc.mu.Lock() - cc.resolverWrapper = rWrapper + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + if cc.idlenessState != ccIdlenessStateActive { + channelz.Errorf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) + cc.mu.Unlock() + return nil + } + + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + conns := cc.conns + cc.conns = make(map[*addrConn]struct{}) + + // TODO: Currently, we close the resolver wrapper upon entering idle mode + // and create a new one upon exiting idle mode. This means that the + // `cc.resolverWrapper` field would be overwritten everytime we exit idle + // mode. While this means that we need to hold `cc.mu` when accessing + // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should + // try to do the same for the balancer and picker wrappers too. + cc.resolverWrapper.close() + cc.blockingpicker.enterIdleMode() + cc.balancerWrapper.enterIdleMode() + cc.csMgr.updateState(connectivity.Idle) + cc.idlenessState = ccIdlenessStateIdle cc.mu.Unlock() - // A blocking dial blocks until the clientConn is ready. - if cc.dopts.block { - for { - cc.Connect() - s := cc.GetState() - if s == connectivity.Ready { - break - } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.connectionError(); err != nil { - terr, ok := err.(interface { - Temporary() bool - }) - if ok && !terr.Temporary() { - return nil, err - } - } - } - if !cc.WaitForStateChange(ctx, s) { - // ctx got timeout or canceled. - if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { - return nil, err - } - return nil, ctx.Err() + go func() { + cc.addTraceEvent("entering idle mode") + for ac := range conns { + ac.tearDown(errConnIdling) + } + }() + return nil +} + +// validateTransportCredentials performs a series of checks on the configured +// transport credentials. It returns a non-nil error if any of these conditions +// are met: +// - no transport creds and no creds bundle is configured +// - both transport creds and creds bundle are configured +// - creds bundle is configured, but it lacks a transport credentials +// - insecure transport creds configured alongside call creds that require +// transport level security +// +// If none of the above conditions are met, the configured credentials are +// deemed valid and a nil error is returned. +func (cc *ClientConn) validateTransportCredentials() error { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return errTransportCredentialsMissing } } } + return nil +} - return cc, nil +// channelzRegistration registers the newly created ClientConn with channelz and +// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. +// A channelz trace event is emitted for ClientConn creation. If the newly +// created ClientConn is a nested one, i.e a valid parent ClientConn ID is +// specified via a dial option, the trace event is also added to the parent. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) channelzRegistration(target string) { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + cc.addTraceEvent("created") } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -345,7 +503,7 @@ func chainUnaryClientInterceptors(cc *ClientConn) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) } } @@ -357,7 +515,7 @@ func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, final if curr == len(interceptors)-1 { return finalInvoker } - return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) } } @@ -393,13 +551,27 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr } } +// newConnectivityStateManager creates an connectivityStateManager with +// the specified id. +func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { + return &connectivityStateManager{ + channelzID: id, + pubSub: grpcsync.NewPubSub(ctx), + } +} + // connectivityStateManager keeps the connectivity.State of ClientConn. // This struct will eventually be exported so the balancers can access it. +// +// TODO: If possible, get rid of the `connectivityStateManager` type, and +// provide this functionality using the `PubSub`, to avoid keeping track of +// the connectivity state at two places. type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} channelzID *channelz.Identifier + pubSub *grpcsync.PubSub } // updateState updates the connectivity.State of ClientConn. @@ -415,6 +587,8 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state + csm.pubSub.Publish(state) + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. @@ -444,7 +618,7 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { type ClientConnInterface interface { // Invoke performs a unary RPC and returns after the response is received // into reply. - Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error // NewStream begins a streaming RPC. NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) } @@ -474,7 +648,9 @@ type ClientConn struct { authority string // See determineAuthority(). dopts dialOptions // Default and user specified dial options. channelzID *channelz.Identifier // Channelz identifier for the channel. + resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + idlenessMgr idle.Manager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -495,11 +671,44 @@ type ClientConn struct { sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + idlenessState ccIdlenessState // Tracks idleness state of the channel. + exitIdleCond *sync.Cond // Signalled when channel exits idle. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } +// ccIdlenessState tracks the idleness state of the channel. +// +// Channels start off in `active` and move to `idle` after a period of +// inactivity. When moving back to `active` upon an incoming RPC, they +// transition through `exiting_idle`. This state is useful for synchronization +// with Close(). +// +// This state tracking is mostly for self-protection. The idlenessManager is +// expected to keep track of the state as well, and is expected not to call into +// the ClientConn unnecessarily. +type ccIdlenessState int8 + +const ( + ccIdlenessStateActive ccIdlenessState = iota + ccIdlenessStateIdle + ccIdlenessStateExitingIdle +) + +func (s ccIdlenessState) String() string { + switch s { + case ccIdlenessStateActive: + return "active" + case ccIdlenessStateIdle: + return "idle" + case ccIdlenessStateExitingIdle: + return "exitingIdle" + default: + return "unknown" + } +} + // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -539,7 +748,10 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.balancerWrapper.exitIdle() + cc.exitIdleMode() + // If the ClientConn was not in idle mode, we need to call ExitIdle on the + // LB policy so that connections can be created. + cc.balancerWrapper.exitIdleMode() } func (cc *ClientConn) scWatcher() { @@ -588,6 +800,10 @@ func init() { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } emptyServiceConfig = cfg.Config.(*ServiceConfig) + + internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { + return cc.csMgr.pubSub.Subscribe(s) + } } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { @@ -696,6 +912,20 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi cc.balancerWrapper.updateSubConnState(sc, s, err) } +// Makes a copy of the input addresses slice and clears out the balancer +// attributes field. Addresses are passed during subconn creation and address +// update operations. In both cases, we will clear the balancer attributes by +// calling this function, and therefore we will be able to use the Equal method +// provided by the resolver.Address type for comparison. +func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { + out := make([]resolver.Address, len(in)) + for i := range in { + out[i] = in[i] + out[i].BalancerAttributes = nil + } + return out +} + // newAddrConn creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. @@ -703,11 +933,12 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub ac := &addrConn{ state: connectivity.Idle, cc: cc, - addrs: addrs, + addrs: copyAddressesWithoutBalancerAttributes(addrs), scopts: opts, dopts: cc.dopts, czData: new(channelzData), resetBackoff: make(chan struct{}), + stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. @@ -788,16 +1019,19 @@ func (cc *ClientConn) incrCallsFailed() { func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } ac.mu.Unlock() return errConnClosing } if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } ac.mu.Unlock() return nil } - // Update connectivity state within the lock to prevent subsequent or - // concurrent calls from resetting the transport more than once. - ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() ac.resetTransport() @@ -816,58 +1050,63 @@ func equalAddresses(a, b []resolver.Address) bool { return true } -// tryUpdateAddrs tries to update ac.addrs with the new addresses list. -// -// If ac is TransientFailure, it updates ac.addrs and returns true. The updated -// addresses will be picked up by retry in the next iteration after backoff. -// -// If ac is Shutdown or Idle, it updates ac.addrs and returns true. -// -// If the addresses is the same as the old list, it does nothing and returns -// true. -// -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// -// If ac is Ready, it checks whether current connected address of ac is in the -// new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. -func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { +// updateAddrs updates ac.addrs with the new addresses list and handles active +// connections or connection attempts. +func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.mu.Lock() - defer ac.mu.Unlock() - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) + + addrs = copyAddressesWithoutBalancerAttributes(addrs) + if equalAddresses(ac.addrs, addrs) { + ac.mu.Unlock() + return + } + + ac.addrs = addrs + if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { - ac.addrs = addrs - return true + // We were not connecting, so do nothing but update the addresses. + ac.mu.Unlock() + return } - if equalAddresses(ac.addrs, addrs) { - return true + if ac.state == connectivity.Ready { + // Try to find the connected address. + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if a.Equal(ac.curAddr) { + // We are connected to a valid address, so do nothing but + // update the addresses. + ac.mu.Unlock() + return + } + } } - if ac.state == connectivity.Connecting { - return false - } + // We are either connected to the wrong address or currently connecting. + // Stop the current iteration and restart. - // ac.state is Ready, try to find the connected address. - var curAddrFound bool - for _, a := range addrs { - a.ServerName = ac.cc.getServerName(a) - if reflect.DeepEqual(ac.curAddr, a) { - curAddrFound = true - break - } + ac.cancel() + ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) + + // We have to defer here because GracefulClose => onClose, which requires + // locking ac.mu. + if ac.transport != nil { + defer ac.transport.GracefulClose() + ac.transport = nil } - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) - if curAddrFound { - ac.addrs = addrs + + if len(addrs) == 0 { + ac.updateConnectivityState(connectivity.Idle, nil) } - return curAddrFound + ac.mu.Unlock() + + // Since we were connecting/connected, we should start a new connection + // attempt. + go ac.resetTransport() } // getServerName determines the serverName to be used in the connection @@ -928,7 +1167,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, @@ -958,23 +1197,13 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel } var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { + // No service config or no LB policy specified in config. + newBalancerName = PickFirstBalancerName + } else if cc.sc.lbConfig != nil { newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName - } + } else { // cc.sc.LB != nil + newBalancerName = *cc.sc.LB } cc.balancerWrapper.switchTo(newBalancerName) } @@ -1013,46 +1242,50 @@ func (cc *ClientConn) ResetConnectBackoff() { // Close tears down the ClientConn and all underlying connections. func (cc *ClientConn) Close() error { - defer cc.cancel() + defer func() { + cc.cancel() + <-cc.csMgr.pubSub.Done() + }() cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return ErrClientConnClosing } + + for cc.idlenessState == ccIdlenessStateExitingIdle { + cc.exitIdleCond.Wait() + } + conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) + pWrapper := cc.blockingpicker rWrapper := cc.resolverWrapper - cc.resolverWrapper = nil bWrapper := cc.balancerWrapper + idlenessMgr := cc.idlenessMgr cc.mu.Unlock() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - cc.blockingpicker.close() + if pWrapper != nil { + pWrapper.close() + } if bWrapper != nil { bWrapper.close() } if rWrapper != nil { rWrapper.close() } + if idlenessMgr != nil { + idlenessMgr.Close() + } for ac := range conns { ac.tearDown(ErrClientConnClosing) } - ted := &channelz.TraceEventDesc{ - Desc: "Channel deleted", - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + cc.addTraceEvent("deleted") // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being // deleted right away. @@ -1082,7 +1315,8 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State + state connectivity.State + stateChan chan struct{} // closed and recreated on every state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1096,8 +1330,15 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } + // When changing states, reset the state change channel. + close(ac.stateChan) + ac.stateChan = make(chan struct{}) ac.state = s - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + if lastErr == nil { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + } else { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) + } ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) } @@ -1117,7 +1358,8 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { func (ac *addrConn) resetTransport() { ac.mu.Lock() - if ac.state == connectivity.Shutdown { + acCtx := ac.ctx + if acCtx.Err() != nil { ac.mu.Unlock() return } @@ -1145,15 +1387,16 @@ func (ac *addrConn) resetTransport() { ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() - if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil { + if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { ac.cc.resolveNow(resolver.ResolveNowOptions{}) - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. ac.mu.Lock() - if ac.state == connectivity.Shutdown { + if acCtx.Err() != nil { + // addrConn was torn down. ac.mu.Unlock() return } + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. @@ -1168,13 +1411,13 @@ func (ac *addrConn) resetTransport() { ac.mu.Unlock() case <-b: timer.Stop() - case <-ac.ctx.Done(): + case <-acCtx.Done(): timer.Stop() return } ac.mu.Lock() - if ac.state != connectivity.Shutdown { + if acCtx.Err() == nil { ac.updateConnectivityState(connectivity.Idle, err) } ac.mu.Unlock() @@ -1189,14 +1432,13 @@ func (ac *addrConn) resetTransport() { // tryAllAddrs tries to creates a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error { +func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() + if ctx.Err() != nil { return errConnClosing } + ac.mu.Lock() ac.cc.mu.RLock() ac.dopts.copts.KeepaliveParams = ac.cc.mkp @@ -1210,7 +1452,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - err := ac.createTransport(addr, copts, connectDeadline) + err := ac.createTransport(ctx, addr, copts, connectDeadline) if err == nil { return nil } @@ -1227,17 +1469,20 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T // createTransport creates a connection to addr. It returns an error if the // address was not successfully connected, or updates ac appropriately with the // new transport. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { +func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { addr.ServerName = ac.cc.getServerName(addr) - hctx, hcancel := context.WithCancel(ac.ctx) + hctx, hcancel := context.WithCancel(ctx) - onClose := grpcsync.OnceFunc(func() { + onClose := func(r transport.GoAwayReason) { ac.mu.Lock() defer ac.mu.Unlock() - if ac.state == connectivity.Shutdown { - // Already shut down. tearDown() already cleared the transport and - // canceled hctx via ac.ctx, and we expected this connection to be - // closed, so do nothing here. + // adjust params based on GoAwayReason + ac.adjustParams(r) + if ctx.Err() != nil { + // Already shut down or connection attempt canceled. tearDown() or + // updateAddrs() already cleared the transport and canceled hctx + // via ac.ctx, and we expected this connection to be closed, so do + // nothing here. return } hcancel() @@ -1254,20 +1499,17 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // Always go idle and wait for the LB policy to initiate a new // connection attempt. ac.updateConnectivityState(connectivity.Idle, nil) - }) - onGoAway := func(r transport.GoAwayReason) { - ac.mu.Lock() - ac.adjustParams(r) - ac.mu.Unlock() - onClose() } - connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) defer cancel() copts.ChannelzParentID = ac.channelzID - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } // newTr is either nil, or closed. hcancel() channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) @@ -1276,7 +1518,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.mu.Lock() defer ac.mu.Unlock() - if ac.state == connectivity.Shutdown { + if ctx.Err() != nil { // This can happen if the subConn was removed while in `Connecting` // state. tearDown() would have set the state to `Shutdown`, but // would not have closed the transport since ac.transport would not @@ -1288,6 +1530,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // The error we pass to Close() is immaterial since there are no open // streams at this point, so no trailers with error details will be sent // out. We just need to pass a non-nil error. + // + // This can also happen when updateAddrs is called during a connection + // attempt. go newTr.Close(transport.ErrConnClosing) return nil } @@ -1347,7 +1592,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // Set up the health check helper functions. currentTr := ac.transport - newStream := func(method string) (interface{}, error) { + newStream := func(method string) (any, error) { ac.mu.Lock() if ac.transport != currentTr { ac.mu.Unlock() @@ -1371,7 +1616,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) + channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) } } }() @@ -1395,6 +1640,29 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } +// getTransport waits until the addrconn is ready and returns the transport. +// If the context expires first, returns an appropriate status. If the +// addrConn is stopped first, returns an Unavailable status error. +func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { + for ctx.Err() == nil { + ac.mu.Lock() + t, state, sc := ac.transport, ac.state, ac.stateChan + ac.mu.Unlock() + if state == connectivity.Ready { + return t, nil + } + if state == connectivity.Shutdown { + return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") + } + + select { + case <-ctx.Done(): + case <-sc: + } + } + return nil, status.FromContextError(ctx.Err()).Err() +} + // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct @@ -1412,16 +1680,7 @@ func (ac *addrConn) tearDown(err error) { ac.updateConnectivityState(connectivity.Shutdown, nil) ac.cancel() ac.curAddr = resolver.Address{} - if err == errConnDrain && curTr != nil { - // GracefulClose(...) may be executed multiple times when - // i) receiving multiple GoAway frames from the server; or - // ii) there are concurrent name resolver/Balancer triggered - // address removal and GoAway. - // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. - ac.mu.Unlock() - curTr.GracefulClose() - ac.mu.Lock() - } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel deleted", Severity: channelz.CtInfo, @@ -1435,6 +1694,29 @@ func (ac *addrConn) tearDown(err error) { // being deleted right away. channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() + + // We have to release the lock before the call to GracefulClose/Close here + // because both of them call onClose(), which requires locking ac.mu. + if curTr != nil { + if err == errConnDrain { + // Close the transport gracefully when the subConn is being shutdown. + // + // GracefulClose() may be executed multiple times if: + // - multiple GoAway frames are received from the server + // - there are concurrent name resolver or balancer triggered + // address removal and GoAway + curTr.GracefulClose() + } else { + // Hard close the transport when the channel is entering idle or is + // being shutdown. In the case where the channel is being shutdown, + // closing of transports is also taken care of by cancelation of cc.ctx. + // But in the case where the channel is entering idle, we need to + // explicitly close the transports here. Instead of distinguishing + // between these two cases, it is simpler to close the transport + // unconditionally here. + curTr.Close(err) + } + } } func (ac *addrConn) getState() connectivity.State { @@ -1522,6 +1804,9 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { // referenced by users. var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") +// getResolver finds the scheme in the cc's resolvers or the global registry. +// scheme should always be lowercase (typically by virtue of url.Parse() +// performing proper RFC3986 behavior). func (cc *ClientConn) getResolver(scheme string) resolver.Builder { for _, rb := range cc.dopts.resolvers { if scheme == rb.Scheme() { @@ -1543,7 +1828,14 @@ func (cc *ClientConn) connectionError() error { return cc.lastConnectionError } -func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { +// parseTargetAndFindResolver parses the user's dial target and stores the +// parsed target in `cc.parsedTarget`. +// +// The resolver to use is determined based on the scheme in the parsed target +// and the same is stored in `cc.resolverBuilder`. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) parseTargetAndFindResolver() error { channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) var rb resolver.Builder @@ -1555,7 +1847,8 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget - return rb, nil + cc.resolverBuilder = rb + return nil } } @@ -1570,51 +1863,98 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { parsedTarget, err = parseTarget(canonicalTarget) if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) - return nil, err + return err } channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) + return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) } cc.parsedTarget = parsedTarget - return rb, nil + cc.resolverBuilder = rb + return nil } // parseTarget uses RFC 3986 semantics to parse the given target into a -// resolver.Target struct containing scheme, authority and endpoint. Query -// params are stripped from the endpoint. +// resolver.Target struct containing url. Query params are stripped from the +// endpoint. func parseTarget(target string) (resolver.Target, error) { u, err := url.Parse(target) if err != nil { return resolver.Target{}, err } - // For targets of the form "[scheme]://[authority]/endpoint, the endpoint - // value returned from url.Parse() contains a leading "/". Although this is - // in accordance with RFC 3986, we do not want to break existing resolver - // implementations which expect the endpoint without the leading "/". So, we - // end up stripping the leading "/" here. But this will result in an - // incorrect parsing for something like "unix:///path/to/socket". Since we - // own the "unix" resolver, we can workaround in the unix resolver by using - // the `URL` field instead of the `Endpoint` field. - endpoint := u.Path - if endpoint == "" { - endpoint = u.Opaque - } - endpoint = strings.TrimPrefix(endpoint, "/") - return resolver.Target{ - Scheme: u.Scheme, - Authority: u.Host, - Endpoint: endpoint, - URL: *u, - }, nil + + return resolver.Target{URL: *u}, nil +} + +func encodeAuthority(authority string) string { + const upperhex = "0123456789ABCDEF" + + // Return for characters that must be escaped as per + // Valid chars are mentioned here: + // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 + shouldEscape := func(c byte) bool { + // Alphanum are always allowed. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '.', '~': // Unreserved characters + return false + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters + return false + case ':', '[', ']', '@': // Authority related delimeters + return false + } + // Everything else must be escaped. + return true + } + + hexCount := 0 + for i := 0; i < len(authority); i++ { + c := authority[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return authority + } + + required := len(authority) + 2*hexCount + t := make([]byte, required) + + j := 0 + // This logic is a barebones version of escape in the go net/url library. + for i := 0; i < len(authority); i++ { + switch c := authority[i]; { + case shouldEscape(c): + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + default: + t[j] = authority[i] + j++ + } + } + return string(t) } // Determine channel authority. The order of precedence is as follows: // - user specified authority override using `WithAuthority` dial option // - creds' notion of server name for the authentication handshake // - endpoint from dial target of the form "scheme://[authority]/endpoint" -func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) { +// +// Stores the determined authority in `cc.authority`. +// +// Returns a non-nil error if the authority returned by the transport +// credentials do not match the authority configured through the dial option. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) determineAuthority() error { + dopts := cc.dopts // Historically, we had two options for users to specify the serverName or // authority for a channel. One was through the transport credentials // (either in its constructor, or through the OverrideServerName() method). @@ -1631,25 +1971,62 @@ func determineAuthority(endpoint, target string, dopts dialOptions) (string, err } authorityFromDialOption := dopts.authority if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { - return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) } + endpoint := cc.parsedTarget.Endpoint() + target := cc.target switch { case authorityFromDialOption != "": - return authorityFromDialOption, nil + cc.authority = authorityFromDialOption case authorityFromCreds != "": - return authorityFromCreds, nil + cc.authority = authorityFromCreds case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): // TODO: remove when the unix resolver implements optional interface to // return channel authority. - return "localhost", nil + cc.authority = "localhost" case strings.HasPrefix(endpoint, ":"): - return "localhost" + endpoint, nil + cc.authority = "localhost" + endpoint default: // TODO: Define an optional interface on the resolver builder to return // the channel authority given the user's dial target. For resolvers // which don't implement this interface, we will use the endpoint from // "scheme://authority/endpoint" as the default authority. - return endpoint, nil + // Escape the endpoint to handle use cases where the endpoint + // might not be a valid authority by default. + // For example an endpoint which has multiple paths like + // 'a/b/c', which is not a valid authority by default. + cc.authority = encodeAuthority(endpoint) } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + return nil +} + +// initResolverWrapper creates a ccResolverWrapper, which builds the name +// resolver. This method grabs the lock to assign the newly built resolver +// wrapper to the cc.resolverWrapper field. +func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { + rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ + target: cc.parsedTarget, + builder: cc.resolverBuilder, + bOpts: resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: creds, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + }, + channelzID: cc.channelzID, + }) + if err != nil { + return fmt.Errorf("failed to build resolver: %v", err) + } + // Resolver implementations may report state update or error inline when + // built (or right after), and this is handled in cc.updateResolverState. + // Also, an error from the resolver might lead to a re-resolution request + // from the balancer, which is handled in resolveNow() where + // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. + cc.mu.Lock() + cc.resolverWrapper = rw + cc.mu.Unlock() + return nil } diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 12977654..411e3dfd 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -27,8 +27,8 @@ import ( // omits the name/string, which vary between the two and are not needed for // anything besides the registry in the encoding package. type baseCodec interface { - Marshal(v interface{}) ([]byte, error) - Unmarshal(data []byte, v interface{}) error + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error } var _ baseCodec = Codec(nil) @@ -41,9 +41,9 @@ var _ baseCodec = encoding.Codec(nil) // Deprecated: use encoding.Codec instead. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // String returns the name of the Codec implementation. This is unused by // gRPC. String() string diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go index 0b206a57..934fac2b 100644 --- a/vendor/google.golang.org/grpc/codes/code_string.go +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -18,7 +18,15 @@ package codes -import "strconv" +import ( + "strconv" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.CanonicalString = canonicalString +} func (c Code) String() string { switch c { @@ -60,3 +68,44 @@ func (c Code) String() string { return "Code(" + strconv.FormatInt(int64(c), 10) + ")" } } + +func canonicalString(c Code) string { + switch c { + case OK: + return "OK" + case Canceled: + return "CANCELLED" + case Unknown: + return "UNKNOWN" + case InvalidArgument: + return "INVALID_ARGUMENT" + case DeadlineExceeded: + return "DEADLINE_EXCEEDED" + case NotFound: + return "NOT_FOUND" + case AlreadyExists: + return "ALREADY_EXISTS" + case PermissionDenied: + return "PERMISSION_DENIED" + case ResourceExhausted: + return "RESOURCE_EXHAUSTED" + case FailedPrecondition: + return "FAILED_PRECONDITION" + case Aborted: + return "ABORTED" + case OutOfRange: + return "OUT_OF_RANGE" + case Unimplemented: + return "UNIMPLEMENTED" + case Internal: + return "INTERNAL" + case Unavailable: + return "UNAVAILABLE" + case DataLoss: + return "DATA_LOSS" + case Unauthenticated: + return "UNAUTHENTICATED" + default: + return "CODE(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index ce2bbc10..877b7cd2 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -23,9 +23,9 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net" "net/url" + "os" credinternal "google.golang.org/grpc/internal/credentials" ) @@ -166,7 +166,7 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor // it will override the virtual host name of authority (e.g. :authority header // field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { - b, err := ioutil.ReadFile(certFile) + b, err := os.ReadFile(certFile) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 9372dc32..1fd0d5c1 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -38,12 +38,14 @@ import ( func init() { internal.AddGlobalDialOptions = func(opt ...DialOption) { - extraDialOptions = append(extraDialOptions, opt...) + globalDialOptions = append(globalDialOptions, opt...) } internal.ClearGlobalDialOptions = func() { - extraDialOptions = nil + globalDialOptions = nil } internal.WithBinaryLogger = withBinaryLogger + internal.JoinDialOptions = newJoinDialOption + internal.DisableGlobalDialOptions = newDisableGlobalDialOptions } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -75,6 +77,8 @@ type dialOptions struct { defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string resolvers []resolver.Builder + idleTimeout time.Duration + recvBufferPool SharedBufferPool } // DialOption configures how we set up the connection. @@ -82,7 +86,7 @@ type DialOption interface { apply(*dialOptions) } -var extraDialOptions []DialOption +var globalDialOptions []DialOption // EmptyDialOption does not alter the dial configuration. It can be embedded in // another structure to build custom dial options. @@ -95,6 +99,16 @@ type EmptyDialOption struct{} func (EmptyDialOption) apply(*dialOptions) {} +type disableGlobalDialOptions struct{} + +func (disableGlobalDialOptions) apply(*dialOptions) {} + +// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn +// from applying the global DialOptions (set via AddGlobalDialOptions). +func newDisableGlobalDialOptions() DialOption { + return &disableGlobalDialOptions{} +} + // funcDialOption wraps a function that modifies dialOptions into an // implementation of the DialOption interface. type funcDialOption struct { @@ -111,13 +125,42 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { } } +type joinDialOption struct { + opts []DialOption +} + +func (jdo *joinDialOption) apply(do *dialOptions) { + for _, opt := range jdo.opts { + opt.apply(do) + } +} + +func newJoinDialOption(opts ...DialOption) DialOption { + return &joinDialOption{opts: opts} +} + +// WithSharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithSharedWriteBuffer(val bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.SharedWriteBuffer = val + }) +} + // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is // 32KB. // -// Zero will disable the write buffer such that each write will be on underlying -// connection. Note: A Send call may not directly translate to a write. +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. func WithWriteBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.WriteBufferSize = s @@ -127,8 +170,9 @@ func WithWriteBufferSize(s int) DialOption { // WithReadBufferSize lets you set the size of read buffer, this determines how // much data can be read at most for each read syscall. // -// The default value for this buffer is 32KB. Zero will disable read buffer for -// a connection so data framer can access the underlying conn directly. +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. func WithReadBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.ReadBufferSize = s @@ -267,6 +311,9 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { // WithBlock returns a DialOption which makes callers of Dial block until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md func WithBlock() DialOption { return newFuncDialOption(func(o *dialOptions) { o.block = true @@ -278,6 +325,9 @@ func WithBlock() DialOption { // the context.DeadlineExceeded error. // Implies WithBlock() // +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// // # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a @@ -420,6 +470,9 @@ func withBinaryLogger(bl binarylog.Logger) DialOption { // FailOnNonTempDialError only affects the initial dial, and does not do // anything useful unless you are also using WithBlock(). // +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// // # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a @@ -590,6 +643,7 @@ func defaultDialOptions() dialOptions { ReadBufferSize: defaultReadBufSize, UseProxy: true, }, + recvBufferPool: nopBufferPool{}, } } @@ -618,3 +672,44 @@ func WithResolvers(rs ...resolver.Builder) DialOption { o.resolvers = append(o.resolvers, rs...) }) } + +// WithIdleTimeout returns a DialOption that configures an idle timeout for the +// channel. If the channel is idle for the configured timeout, i.e there are no +// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode +// and as a result the name resolver and load balancer will be shut down. The +// channel will exit idle mode when the Connect() method is called or when an +// RPC is initiated. +// +// By default this feature is disabled, which can also be explicitly configured +// by passing zero to this function. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithIdleTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.idleTimeout = d + }) +} + +// WithRecvBufferPool returns a DialOption that configures the ClientConn +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: WithStatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.recvBufferPool = bufferPool + }) +} diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 711763d5..69d5580b 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -75,7 +75,9 @@ var registeredCompressor = make(map[string]Compressor) // registered with the same name, the one registered last will take effect. func RegisterCompressor(c Compressor) { registeredCompressor[c.Name()] = c - grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) + if !grpcutil.IsCompressorNameRegistered(c.Name()) { + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) + } } // GetCompressor returns Compressor for the given compressor name. @@ -88,9 +90,9 @@ func GetCompressor(name string) Compressor { // methods can be called from concurrent goroutines. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // Name returns the name of the Codec implementation. The returned string // will be used as part of content type in transmission. The result must be // static; the result cannot change between calls. diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 3009b35a..0ee3d3ba 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -37,7 +37,7 @@ func init() { // codec is a Codec implementation with protobuf. It is the default codec for gRPC. type codec struct{} -func (codec) Marshal(v interface{}) ([]byte, error) { +func (codec) Marshal(v any) ([]byte, error) { vv, ok := v.(proto.Message) if !ok { return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) @@ -45,7 +45,7 @@ func (codec) Marshal(v interface{}) ([]byte, error) { return proto.Marshal(vv) } -func (codec) Unmarshal(data []byte, v interface{}) error { +func (codec) Unmarshal(data []byte, v any) error { vv, ok := v.(proto.Message) if !ok { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index 8358dd6e..ac73c9ce 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -31,71 +31,71 @@ type componentData struct { var cache = map[string]*componentData{} -func (c *componentData) InfoDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) InfoDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.InfoDepth(depth+1, args...) } -func (c *componentData) WarningDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) WarningDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.WarningDepth(depth+1, args...) } -func (c *componentData) ErrorDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) ErrorDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.ErrorDepth(depth+1, args...) } -func (c *componentData) FatalDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) FatalDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.FatalDepth(depth+1, args...) } -func (c *componentData) Info(args ...interface{}) { +func (c *componentData) Info(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warning(args ...interface{}) { +func (c *componentData) Warning(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Error(args ...interface{}) { +func (c *componentData) Error(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatal(args ...interface{}) { +func (c *componentData) Fatal(args ...any) { c.FatalDepth(1, args...) } -func (c *componentData) Infof(format string, args ...interface{}) { +func (c *componentData) Infof(format string, args ...any) { c.InfoDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Warningf(format string, args ...interface{}) { +func (c *componentData) Warningf(format string, args ...any) { c.WarningDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Errorf(format string, args ...interface{}) { +func (c *componentData) Errorf(format string, args ...any) { c.ErrorDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Fatalf(format string, args ...interface{}) { +func (c *componentData) Fatalf(format string, args ...any) { c.FatalDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Infoln(args ...interface{}) { +func (c *componentData) Infoln(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warningln(args ...interface{}) { +func (c *componentData) Warningln(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Errorln(args ...interface{}) { +func (c *componentData) Errorln(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatalln(args ...interface{}) { +func (c *componentData) Fatalln(args ...any) { c.FatalDepth(1, args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index c8bb2be3..16928c9c 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -42,53 +42,53 @@ func V(l int) bool { } // Info logs to the INFO log. -func Info(args ...interface{}) { +func Info(args ...any) { grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. -func Infof(format string, args ...interface{}) { +func Infof(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. -func Infoln(args ...interface{}) { +func Infoln(args ...any) { grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. -func Warning(args ...interface{}) { +func Warning(args ...any) { grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. -func Warningf(format string, args ...interface{}) { +func Warningf(format string, args ...any) { grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. -func Warningln(args ...interface{}) { +func Warningln(args ...any) { grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. -func Error(args ...interface{}) { +func Error(args ...any) { grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. -func Errorf(format string, args ...interface{}) { +func Errorf(format string, args ...any) { grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. -func Errorln(args ...interface{}) { +func Errorln(args ...any) { grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. -func Fatal(args ...interface{}) { +func Fatal(args ...any) { grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -96,7 +96,7 @@ func Fatal(args ...interface{}) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. -func Fatalf(format string, args ...interface{}) { +func Fatalf(format string, args ...any) { grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) @@ -104,7 +104,7 @@ func Fatalf(format string, args ...interface{}) { // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // It calle os.Exit()) with exit code 1. -func Fatalln(args ...interface{}) { +func Fatalln(args ...any) { grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -113,20 +113,20 @@ func Fatalln(args ...interface{}) { // Print prints to the logger. Arguments are handled in the manner of fmt.Print. // // Deprecated: use Info. -func Print(args ...interface{}) { +func Print(args ...any) { grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. -func Printf(format string, args ...interface{}) { +func Printf(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. -func Println(args ...interface{}) { +func Println(args ...any) { grpclog.Logger.Infoln(args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index ef06a482..b1674d82 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -24,12 +24,12 @@ import "google.golang.org/grpc/internal/grpclog" // // Deprecated: use LoggerV2. type Logger interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) } // SetLogger sets the logger that is used in grpc. Call only from @@ -45,39 +45,39 @@ type loggerWrapper struct { Logger } -func (g *loggerWrapper) Info(args ...interface{}) { +func (g *loggerWrapper) Info(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Infoln(args ...interface{}) { +func (g *loggerWrapper) Infoln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Infof(format string, args ...interface{}) { +func (g *loggerWrapper) Infof(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Warning(args ...interface{}) { +func (g *loggerWrapper) Warning(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Warningln(args ...interface{}) { +func (g *loggerWrapper) Warningln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Warningf(format string, args ...interface{}) { +func (g *loggerWrapper) Warningf(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Error(args ...interface{}) { +func (g *loggerWrapper) Error(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Errorln(args ...interface{}) { +func (g *loggerWrapper) Errorln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Errorf(format string, args ...interface{}) { +func (g *loggerWrapper) Errorf(format string, args ...any) { g.Logger.Printf(format, args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index b5560b47..ecfd36d7 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -22,7 +22,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "log" "os" "strconv" @@ -34,35 +33,35 @@ import ( // LoggerV2 does underlying logging work for grpclog. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -140,9 +139,9 @@ func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) // newLoggerV2 creates a loggerV2 to be used as default logger. // All logs are written to stderr. func newLoggerV2() LoggerV2 { - errorW := ioutil.Discard - warningW := ioutil.Discard - infoW := ioutil.Discard + errorW := io.Discard + warningW := io.Discard + infoW := io.Discard logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") switch logLevel { @@ -183,53 +182,53 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } -func (g *loggerT) Info(args ...interface{}) { +func (g *loggerT) Info(args ...any) { g.output(infoLog, fmt.Sprint(args...)) } -func (g *loggerT) Infoln(args ...interface{}) { +func (g *loggerT) Infoln(args ...any) { g.output(infoLog, fmt.Sprintln(args...)) } -func (g *loggerT) Infof(format string, args ...interface{}) { +func (g *loggerT) Infof(format string, args ...any) { g.output(infoLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Warning(args ...interface{}) { +func (g *loggerT) Warning(args ...any) { g.output(warningLog, fmt.Sprint(args...)) } -func (g *loggerT) Warningln(args ...interface{}) { +func (g *loggerT) Warningln(args ...any) { g.output(warningLog, fmt.Sprintln(args...)) } -func (g *loggerT) Warningf(format string, args ...interface{}) { +func (g *loggerT) Warningf(format string, args ...any) { g.output(warningLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Error(args ...interface{}) { +func (g *loggerT) Error(args ...any) { g.output(errorLog, fmt.Sprint(args...)) } -func (g *loggerT) Errorln(args ...interface{}) { +func (g *loggerT) Errorln(args ...any) { g.output(errorLog, fmt.Sprintln(args...)) } -func (g *loggerT) Errorf(format string, args ...interface{}) { +func (g *loggerT) Errorf(format string, args ...any) { g.output(errorLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Fatal(args ...interface{}) { +func (g *loggerT) Fatal(args ...any) { g.output(fatalLog, fmt.Sprint(args...)) os.Exit(1) } -func (g *loggerT) Fatalln(args ...interface{}) { +func (g *loggerT) Fatalln(args ...any) { g.output(fatalLog, fmt.Sprintln(args...)) os.Exit(1) } -func (g *loggerT) Fatalf(format string, args ...interface{}) { +func (g *loggerT) Fatalf(format string, args ...any) { g.output(fatalLog, fmt.Sprintf(format, args...)) os.Exit(1) } @@ -249,11 +248,11 @@ func (g *loggerT) V(l int) bool { type DepthLoggerV2 interface { LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index bb96ef57..877d78fc 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -23,7 +23,7 @@ import ( ) // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. -type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error +type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. // Unary interceptors can be specified as a DialOption, using @@ -40,7 +40,7 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{ // defaults from the ClientConn as well as per-call options. // // The returned error must be compatible with the status package. -type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) @@ -66,7 +66,7 @@ type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *Cli // server side. All per-rpc information may be mutated by the interceptor. type UnaryServerInfo struct { // Server is the service implementation the user provides. This is read-only. - Server interface{} + Server any // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string } @@ -78,13 +78,13 @@ type UnaryServerInfo struct { // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) +type UnaryHandler func(ctx context.Context, req any) (any, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info // contains all the information of this RPC the interceptor can operate on. And handler is the wrapper // of the service method implementation. It is the responsibility of the interceptor to invoke handler // to complete the RPC. -type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) +type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) // StreamServerInfo consists of various information about a streaming RPC on // server side. All per-rpc information may be mutated by the interceptor. @@ -101,4 +101,4 @@ type StreamServerInfo struct { // info contains all the information of this RPC the interceptor can operate on. And handler is the // service method implementation. It is the responsibility of the interceptor to invoke handler to // complete the RPC. -type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error +type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 08666f62..3c594e6e 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -200,8 +200,8 @@ func (gsb *Balancer) ExitIdle() { } } -// UpdateSubConnState forwards the update to the appropriate child. -func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +// updateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { gsb.currentMu.Lock() defer gsb.currentMu.Unlock() gsb.mu.Lock() @@ -214,13 +214,26 @@ func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubC } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { balToUpdate = gsb.balancerPending } - gsb.mu.Unlock() if balToUpdate == nil { // SubConn belonged to a stale lb policy that has not yet fully closed, // or the balancer was already closed. + gsb.mu.Unlock() return } - balToUpdate.UpdateSubConnState(sc, state) + if state.ConnectivityState == connectivity.Shutdown { + delete(balToUpdate.subconns, sc) + } + gsb.mu.Unlock() + if cb != nil { + cb(state) + } else { + balToUpdate.UpdateSubConnState(sc, state) + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.updateSubConnState(sc, state, nil) } // Close closes any active child balancers. @@ -242,7 +255,7 @@ func (gsb *Balancer) Close() { // // It implements the balancer.ClientConn interface and is passed down in that // capacity to the wrapped balancer. It maintains a set of subConns created by -// the wrapped balancer and calls from the latter to create/update/remove +// the wrapped balancer and calls from the latter to create/update/shutdown // SubConns update this set before being forwarded to the parent ClientConn. // State updates from the wrapped balancer can result in invocation of the // graceful switch logic. @@ -254,21 +267,10 @@ type balancerWrapper struct { subconns map[balancer.SubConn]bool // subconns created by this balancer } -func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - if state.ConnectivityState == connectivity.Shutdown { - bw.gsb.mu.Lock() - delete(bw.subconns, sc) - bw.gsb.mu.Unlock() - } - // There is no need to protect this read with a mutex, as the write to the - // Balancer field happens in SwitchTo, which completes before this can be - // called. - bw.Balancer.UpdateSubConnState(sc, state) -} - -// Close closes the underlying LB policy and removes the subconns it created. bw -// must not be referenced via balancerCurrent or balancerPending in gsb when -// called. gsb.mu must not be held. Does not panic with a nil receiver. +// Close closes the underlying LB policy and shuts down the subconns it +// created. bw must not be referenced via balancerCurrent or balancerPending in +// gsb when called. gsb.mu must not be held. Does not panic with a nil +// receiver. func (bw *balancerWrapper) Close() { // before Close is called. if bw == nil { @@ -281,7 +283,7 @@ func (bw *balancerWrapper) Close() { bw.Balancer.Close() bw.gsb.mu.Lock() for sc := range bw.subconns { - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() } bw.gsb.mu.Unlock() } @@ -335,13 +337,16 @@ func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.Ne } bw.gsb.mu.Unlock() + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } sc, err := bw.gsb.cc.NewSubConn(addrs, opts) if err != nil { return nil, err } bw.gsb.mu.Lock() if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() bw.gsb.mu.Unlock() return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) } @@ -360,13 +365,9 @@ func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { } func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { - bw.gsb.mu.Lock() - if !bw.gsb.balancerCurrentOrPending(bw) { - bw.gsb.mu.Unlock() - return - } - bw.gsb.mu.Unlock() - bw.gsb.cc.RemoveSubConn(sc) + // Note: existing third party balancers may call this, so it must remain + // until RemoveSubConn is fully removed. + sc.Shutdown() } func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go index 3a905d96..94a08d68 100644 --- a/vendor/google.golang.org/grpc/internal/balancerload/load.go +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -25,7 +25,7 @@ import ( // Parser converts loads from metadata into a concrete type. type Parser interface { // Parse parses loads from metadata. - Parse(md metadata.MD) interface{} + Parse(md metadata.MD) any } var parser Parser @@ -38,7 +38,7 @@ func SetParser(lr Parser) { } // Parse calls parser.Read(). -func Parse(md metadata.MD) interface{} { +func Parse(md metadata.MD) any { if parser == nil { return nil } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 809d73cc..755fdebc 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -28,8 +28,13 @@ import ( "google.golang.org/grpc/internal/grpcutil" ) -// Logger is the global binary logger. It can be used to get binary logger for -// each method. +var grpclogLogger = grpclog.Component("binarylog") + +// Logger specifies MethodLoggers for method names with a Log call that +// takes a context. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type Logger interface { GetMethodLogger(methodName string) MethodLogger } @@ -40,8 +45,6 @@ type Logger interface { // It is used to get a MethodLogger for each individual method. var binLogger Logger -var grpclogLogger = grpclog.Component("binarylog") - // SetLogger sets the binary logger. // // Only call this at init time. diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 179f4a26..0f31274a 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -19,6 +19,7 @@ package binarylog import ( + "context" "net" "strings" "sync/atomic" @@ -26,7 +27,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -48,8 +49,11 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type MethodLogger interface { - Log(LogEntryConfig) + Log(context.Context, LogEntryConfig) } // TruncatingMethodLogger is a method logger that truncates headers and messages @@ -64,6 +68,9 @@ type TruncatingMethodLogger struct { } // NewTruncatingMethodLogger returns a new truncating method logger. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { return &TruncatingMethodLogger{ headerMaxLen: h, @@ -79,7 +86,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { // Build is an internal only method for building the proto message out of the // input event. It's made public to enable other library to reuse as much logic // in TruncatingMethodLogger as possible. -func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -87,22 +94,22 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m.SequenceIdWithinCall = ml.idWithinCallGen.next() switch pay := m.Payload.(type) { - case *pb.GrpcLogEntry_ClientHeader: + case *binlogpb.GrpcLogEntry_ClientHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) - case *pb.GrpcLogEntry_ServerHeader: + case *binlogpb.GrpcLogEntry_ServerHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) - case *pb.GrpcLogEntry_Message: + case *binlogpb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } return m } // Log creates a proto binary log entry, and logs it to the sink. -func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) { +func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } -func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -121,7 +128,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated // but not counted towards the size limit. continue } - currentEntryLen := uint64(len(entry.Value)) + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) if currentEntryLen > bytesLimit { break } @@ -132,7 +139,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated return truncated } -func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } @@ -144,8 +151,11 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated } // LogEntryConfig represents the configuration for binary log entry. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type LogEntryConfig interface { - toProto() *pb.GrpcLogEntry + toProto() *binlogpb.GrpcLogEntry } // ClientHeader configs the binary log entry to be a ClientHeader entry. @@ -159,10 +169,10 @@ type ClientHeader struct { PeerAddr net.Addr } -func (c *ClientHeader) toProto() *pb.GrpcLogEntry { +func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { // This function doesn't need to set all the fields (e.g. seq ID). The Log // function will set the fields when necessary. - clientHeader := &pb.ClientHeader{ + clientHeader := &binlogpb.ClientHeader{ Metadata: mdToMetadataProto(c.Header), MethodName: c.MethodName, Authority: c.Authority, @@ -170,16 +180,16 @@ func (c *ClientHeader) toProto() *pb.GrpcLogEntry { if c.Timeout > 0 { clientHeader.Timeout = ptypes.DurationProto(c.Timeout) } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Payload: &pb.GrpcLogEntry_ClientHeader{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ ClientHeader: clientHeader, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -195,19 +205,19 @@ type ServerHeader struct { PeerAddr net.Addr } -func (c *ServerHeader) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, - Payload: &pb.GrpcLogEntry_ServerHeader{ - ServerHeader: &pb.ServerHeader{ +func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ Metadata: mdToMetadataProto(c.Header), }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -220,10 +230,10 @@ type ClientMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } -func (c *ClientMessage) toProto() *pb.GrpcLogEntry { +func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -238,19 +248,19 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -260,10 +270,10 @@ type ServerMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } -func (c *ServerMessage) toProto() *pb.GrpcLogEntry { +func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -278,19 +288,19 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -300,15 +310,15 @@ type ClientHalfClose struct { OnClientSide bool } -func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, +func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, Payload: nil, // No payload here. } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -324,7 +334,7 @@ type ServerTrailer struct { PeerAddr net.Addr } -func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { +func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { st, ok := status.FromError(c.Err) if !ok { grpclogLogger.Info("binarylogging: error in trailer is not a status error") @@ -340,10 +350,10 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) } } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, - Payload: &pb.GrpcLogEntry_Trailer{ - Trailer: &pb.Trailer{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ Metadata: mdToMetadataProto(c.Trailer), StatusCode: uint32(st.Code()), StatusMessage: st.Message(), @@ -352,9 +362,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -367,15 +377,15 @@ type Cancel struct { OnClientSide bool } -func (c *Cancel) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, +func (c *Cancel) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, Payload: nil, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -392,15 +402,15 @@ func metadataKeyOmit(key string) bool { return strings.HasPrefix(key, "grpc-") } -func mdToMetadataProto(md metadata.MD) *pb.Metadata { - ret := &pb.Metadata{} +func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { + ret := &binlogpb.Metadata{} for k, vv := range md { if metadataKeyOmit(k) { continue } for _, v := range vv { ret.Entry = append(ret.Entry, - &pb.MetadataEntry{ + &binlogpb.MetadataEntry{ Key: k, Value: []byte(v), }, @@ -410,26 +420,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata { return ret } -func addrToProto(addr net.Addr) *pb.Address { - ret := &pb.Address{} +func addrToProto(addr net.Addr) *binlogpb.Address { + ret := &binlogpb.Address{} switch a := addr.(type) { case *net.TCPAddr: if a.IP.To4() != nil { - ret.Type = pb.Address_TYPE_IPV4 + ret.Type = binlogpb.Address_TYPE_IPV4 } else if a.IP.To16() != nil { - ret.Type = pb.Address_TYPE_IPV6 + ret.Type = binlogpb.Address_TYPE_IPV6 } else { - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN // Do not set address and port fields. break } ret.Address = a.IP.String() ret.IpPort = uint32(a.Port) case *net.UnixAddr: - ret.Type = pb.Address_TYPE_UNIX + ret.Type = binlogpb.Address_TYPE_UNIX ret.Address = a.String() default: - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN } return ret } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index c2fdd58b..264de387 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -26,7 +26,7 @@ import ( "time" "github.com/golang/protobuf/proto" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" ) var ( @@ -42,15 +42,15 @@ type Sink interface { // Write will be called to write the log entry into the sink. // // It should be thread-safe so it can be called in parallel. - Write(*pb.GrpcLogEntry) error + Write(*binlogpb.GrpcLogEntry) error // Close will be called when the Sink is replaced by a new Sink. Close() error } type noopSink struct{} -func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } -func (ns *noopSink) Close() error { return nil } +func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } // newWriterSink creates a binary log sink with the given writer. // @@ -66,7 +66,7 @@ type writerSink struct { out io.Writer } -func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { +func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) @@ -96,7 +96,7 @@ type bufferedSink struct { done chan struct{} } -func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { +func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error { fs.mu.Lock() defer fs.mu.Unlock() if !fs.flusherStarted { diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 9f6a0c12..4399c3df 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -28,35 +28,38 @@ import "sync" // the underlying mutex used for synchronization. // // Unbounded supports values of any type to be stored in it by using a channel -// of `interface{}`. This means that a call to Put() incurs an extra memory -// allocation, and also that users need a type assertion while reading. For -// performance critical code paths, using Unbounded is strongly discouraged and -// defining a new type specific implementation of this buffer is preferred. See +// of `any`. This means that a call to Put() incurs an extra memory allocation, +// and also that users need a type assertion while reading. For performance +// critical code paths, using Unbounded is strongly discouraged and defining a +// new type specific implementation of this buffer is preferred. See // internal/transport/transport.go for an example of this. type Unbounded struct { - c chan interface{} + c chan any + closed bool mu sync.Mutex - backlog []interface{} + backlog []any } // NewUnbounded returns a new instance of Unbounded. func NewUnbounded() *Unbounded { - return &Unbounded{c: make(chan interface{}, 1)} + return &Unbounded{c: make(chan any, 1)} } // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t interface{}) { +func (b *Unbounded) Put(t any) { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) == 0 { select { case b.c <- t: - b.mu.Unlock() return default: } } b.backlog = append(b.backlog, t) - b.mu.Unlock() } // Load sends the earliest buffered data, if any, onto the read channel @@ -64,6 +67,10 @@ func (b *Unbounded) Put(t interface{}) { // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -72,7 +79,6 @@ func (b *Unbounded) Load() { default: } } - b.mu.Unlock() } // Get returns a read channel on which values added to the buffer, via Put(), @@ -80,6 +86,20 @@ func (b *Unbounded) Load() { // // Upon reading a value from this channel, users are expected to call Load() to // send the next buffered value onto the channel if there is any. -func (b *Unbounded) Get() <-chan interface{} { +// +// If the unbounded buffer is closed, the read channel returned by this method +// is closed. +func (b *Unbounded) Get() <-chan any { return b.c } + +// Close closes the unbounded buffer. +func (b *Unbounded) Close() { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } + b.closed = true + close(b.c) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 777cbcd7..5395e775 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,9 +24,7 @@ package channelz import ( - "context" "errors" - "fmt" "sort" "sync" "sync/atomic" @@ -40,8 +38,11 @@ const ( ) var ( - db dbWrapper - idGen idGenerator + // IDGen is the global channelz entity ID generator. It should not be used + // outside this package except by tests. + IDGen IDGenerator + + db dbWrapper // EntryPerPage defines the number of channelz entries to be shown on a web page. EntryPerPage = int64(50) curState int32 @@ -52,14 +53,14 @@ var ( func TurnOn() { if !IsOn() { db.set(newChannelMap()) - idGen.reset() + IDGen.Reset() atomic.StoreInt32(&curState, 1) } } // IsOn returns whether channelz data collection is on. func IsOn() bool { - return atomic.CompareAndSwapInt32(&curState, 1, 1) + return atomic.LoadInt32(&curState) == 1 } // SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). @@ -97,43 +98,6 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorageForTesting initializes channelz data storage and id -// generator for testing purposes. -// -// Returns a cleanup function to be invoked by the test, which waits for up to -// 10s for all channelz state to be reset by the grpc goroutines when those -// entities get closed. This cleanup function helps with ensuring that tests -// don't mess up each other. -func NewChannelzStorageForTesting() (cleanup func() error) { - db.set(newChannelMap()) - idGen.reset() - - return func() error { - cm := db.get() - if cm == nil { - return nil - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - ticker := time.NewTicker(10 * time.Millisecond) - defer ticker.Stop() - for { - cm.mu.RLock() - topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) - cm.mu.RUnlock() - - if err := ctx.Err(); err != nil { - return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) - } - if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { - return nil - } - <-ticker.C - } - } -} - // GetTopChannels returns a slice of top channel's ChannelMetric, along with a // boolean indicating whether there's more top channels to be queried for. // @@ -193,7 +157,7 @@ func GetServer(id int64) *ServerMetric { // // If channelz is not turned ON, the channelz database is not mutated. func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() var parent int64 isTopChannel := true if pid != nil { @@ -229,7 +193,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er if pid == nil { return nil, errors.New("a SubChannel's parent id cannot be nil") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefSubChannel, id, pid), nil } @@ -251,7 +215,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er // // If channelz is not turned ON, the channelz database is not mutated. func RegisterServer(s Server, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefServer, id, nil) } @@ -277,7 +241,7 @@ func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a ListenSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefListenSocket, id, pid), nil } @@ -297,7 +261,7 @@ func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a NormalSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefNormalSocket, id, pid), nil } @@ -776,14 +740,17 @@ func (c *channelMap) GetServer(id int64) *ServerMetric { return sm } -type idGenerator struct { +// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. +type IDGenerator struct { id int64 } -func (i *idGenerator) reset() { +// Reset resets the generated ID back to zero. Should only be used at +// initialization or by tests sensitive to the ID number. +func (i *IDGenerator) Reset() { atomic.StoreInt64(&i.id, 0) } -func (i *idGenerator) genID() int64 { +func (i *IDGenerator) genID() int64 { return atomic.AddInt64(&i.id, 1) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index 8e13a3d2..f89e6f77 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -31,7 +31,7 @@ func withParens(id *Identifier) string { } // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtInfo, @@ -39,7 +39,7 @@ func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtInfo, @@ -47,7 +47,7 @@ func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...inter } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtWarning, @@ -55,7 +55,7 @@ func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtWarning, @@ -63,7 +63,7 @@ func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...in } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtError, @@ -71,7 +71,7 @@ func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtError, diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 7b2f350e..1d4020f5 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -628,6 +628,7 @@ type tracedChannel interface { type channelTrace struct { cm *channelMap + clearCalled bool createdTime time.Time eventCount int64 mu sync.Mutex @@ -656,6 +657,10 @@ func (c *channelTrace) append(e *TraceEvent) { } func (c *channelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true c.mu.Lock() for _, e := range c.events { if e.RefID != 0 { diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go index 8d194e44..98288c3f 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -23,7 +23,7 @@ import ( ) // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket interface{}) *SocketOptionData { +func GetSocketOption(socket any) *SocketOptionData { c, ok := socket.(syscall.Conn) if !ok { return nil diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go index 837ddc40..b5568b22 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -22,6 +22,6 @@ package channelz // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c interface{}) *SocketOptionData { +func GetSocketOption(c any) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go index 32c9b590..9deee7f6 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -25,12 +25,12 @@ import ( type requestInfoKey struct{} // NewRequestInfoContext creates a context with ri. -func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { +func NewRequestInfoContext(ctx context.Context, ri any) context.Context { return context.WithValue(ctx, requestInfoKey{}, ri) } // RequestInfoFromContext extracts the RequestInfo from ctx. -func RequestInfoFromContext(ctx context.Context) interface{} { +func RequestInfoFromContext(ctx context.Context) any { return ctx.Value(requestInfoKey{}) } @@ -39,11 +39,11 @@ func RequestInfoFromContext(ctx context.Context) interface{} { type clientHandshakeInfoKey struct{} // ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. -func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { +func ClientHandshakeInfoFromContext(ctx context.Context) any { return ctx.Value(clientHandshakeInfoKey{}) } // NewClientHandshakeInfoContext creates a context with chi. -func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { +func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 7edd196b..3cf10ddf 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -21,19 +21,52 @@ package envconfig import ( "os" + "strconv" "strings" ) -const ( - prefix = "GRPC_GO_" - txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" - advertiseCompressorsStr = prefix + "ADVERTISE_COMPRESSORS" -) - var ( // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). - TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") + TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) // AdvertiseCompressors is set if registered compressor should be advertised // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). - AdvertiseCompressors = !strings.EqualFold(os.Getenv(advertiseCompressorsStr), "false") + AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 + // entries but may be overridden by setting the environment variable + // "GRPC_RING_HASH_CAP". This does not override the default bounds + // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). + RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // PickFirstLBConfig is set if we should support configuration of the + // pick_first LB policy. + PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true) + // LeastRequestLB is set if we should support the least_request_experimental + // LB policy, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". + LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS + // handshakes that can be performed. + ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) ) + +func boolFromEnv(envVar string, def bool) bool { + if def { + // The default is true; return true unless the variable is "false". + return !strings.EqualFold(os.Getenv(envVar), "false") + } + // The default is false; return false unless the variable is "true". + return strings.EqualFold(os.Getenv(envVar), "true") +} + +func uint64FromEnv(envVar string, def, min, max uint64) uint64 { + v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) + if err != nil { + return def + } + if v < min { + return min + } + if v > max { + return max + } + return v +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go index 821dd0a7..dd314cfb 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/observability.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -28,9 +28,15 @@ const ( var ( // ObservabilityConfig is the json configuration for the gcp/observability // package specified directly in the envObservabilityConfig env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ObservabilityConfig = os.Getenv(envObservabilityConfig) // ObservabilityConfigFile is the json configuration for the // gcp/observability specified in a file with the location specified in // envObservabilityConfigFile env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) ) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index af09711a..02b4b6a1 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -20,7 +20,6 @@ package envconfig import ( "os" - "strings" ) const ( @@ -36,16 +35,6 @@ const ( // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" - - ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" - clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" - aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" - outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" - federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" - rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" - - c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" ) var ( @@ -64,38 +53,43 @@ var ( // XDSRingHash indicates whether ring hash support is enabled, which can be // disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". - XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false") + XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) // XDSClientSideSecurity is used to control processing of security // configuration on the client-side. // // Note that there is no env var protection for the server-side because we // have a brand new API on the server-side and users explicitly need to use // the new API to get security integration on the server. - XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false") - // XDSAggregateAndDNS indicates whether processing of aggregated cluster - // and DNS cluster is enabled, which can be enabled by setting the - // environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to - // "true". - XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false") + XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) + // XDSAggregateAndDNS indicates whether processing of aggregated cluster and + // DNS cluster is enabled, which can be disabled by setting the environment + // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + // to "false". + XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, // which can be disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". - XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") + XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) // XDSOutlierDetection indicates whether outlier detection support is // enabled, which can be disabled by setting the environment variable // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". - XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false") - // XDSFederation indicates whether federation support is enabled. - XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") + XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) + // XDSFederation indicates whether federation support is enabled, which can + // be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". + XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be enabled by + // support for the RLS CLuster Specifier is enabled, which can be disabled by // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "true". - XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true") + // "false". + XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. - C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which + // can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". + XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index b68e26a3..bfc45102 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -30,7 +30,7 @@ var Logger LoggerV2 var DepthLogger DepthLoggerV2 // InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...interface{}) { +func InfoDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.InfoDepth(depth, args...) } else { @@ -39,7 +39,7 @@ func InfoDepth(depth int, args ...interface{}) { } // WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...interface{}) { +func WarningDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.WarningDepth(depth, args...) } else { @@ -48,7 +48,7 @@ func WarningDepth(depth int, args ...interface{}) { } // ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...interface{}) { +func ErrorDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.ErrorDepth(depth, args...) } else { @@ -57,7 +57,7 @@ func ErrorDepth(depth int, args ...interface{}) { } // FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...interface{}) { +func FatalDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.FatalDepth(depth, args...) } else { @@ -71,35 +71,35 @@ func FatalDepth(depth int, args ...interface{}) { // is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -116,11 +116,11 @@ type LoggerV2 interface { // later release. type DepthLoggerV2 interface { // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go index 82af70e9..faa998de 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -31,7 +31,7 @@ type PrefixLogger struct { } // Infof does info logging. -func (pl *PrefixLogger) Infof(format string, args ...interface{}) { +func (pl *PrefixLogger) Infof(format string, args ...any) { if pl != nil { // Handle nil, so the tests can pass in a nil logger. format = pl.prefix + format @@ -42,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...interface{}) { } // Warningf does warning logging. -func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { +func (pl *PrefixLogger) Warningf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) @@ -52,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { } // Errorf does error logging. -func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { +func (pl *PrefixLogger) Errorf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) @@ -62,7 +62,10 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { } // Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { +func (pl *PrefixLogger) Debugf(format string, args ...any) { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. if !Logger.V(2) { return } @@ -73,6 +76,15 @@ func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { return } InfoDepth(1, fmt.Sprintf(format, args...)) + +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (pl *PrefixLogger) V(l int) bool { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. + return Logger.V(l) } // NewPrefixLogger creates a prefix logger with the given prefix. diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index 517ea706..aa97273e 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -72,3 +72,24 @@ func Uint64() uint64 { defer mu.Unlock() return r.Uint64() } + +// Uint32 implements rand.Uint32 on the grpcrand global source. +func Uint32() uint32 { + mu.Lock() + defer mu.Unlock() + return r.Uint32() +} + +// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. +func ExpFloat64() float64 { + mu.Lock() + defer mu.Unlock() + return r.ExpFloat64() +} + +// Shuffle implements rand.Shuffle on the grpcrand global source. +var Shuffle = func(n int, f func(int, int)) { + mu.Lock() + defer mu.Unlock() + r.Shuffle(n, f) +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go new file mode 100644 index 00000000..900917db --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -0,0 +1,125 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/buffer" +) + +// CallbackSerializer provides a mechanism to schedule callbacks in a +// synchronized manner. It provides a FIFO guarantee on the order of execution +// of scheduled callbacks. New callbacks can be scheduled by invoking the +// Schedule() method. +// +// This type is safe for concurrent access. +type CallbackSerializer struct { + // done is closed once the serializer is shut down completely, i.e all + // scheduled callbacks are executed and the serializer has deallocated all + // its resources. + done chan struct{} + + callbacks *buffer.Unbounded + closedMu sync.Mutex + closed bool +} + +// NewCallbackSerializer returns a new CallbackSerializer instance. The provided +// context will be passed to the scheduled callbacks. Users should cancel the +// provided context to shutdown the CallbackSerializer. It is guaranteed that no +// callbacks will be added once this context is canceled, and any pending un-run +// callbacks will be executed before the serializer is shut down. +func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { + cs := &CallbackSerializer{ + done: make(chan struct{}), + callbacks: buffer.NewUnbounded(), + } + go cs.run(ctx) + return cs +} + +// Schedule adds a callback to be scheduled after existing callbacks are run. +// +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +// +// Return value indicates if the callback was successfully added to the list of +// callbacks to be executed by the serializer. It is not possible to add +// callbacks once the context passed to NewCallbackSerializer is cancelled. +func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + cs.closedMu.Lock() + defer cs.closedMu.Unlock() + + if cs.closed { + return false + } + cs.callbacks.Put(f) + return true +} + +func (cs *CallbackSerializer) run(ctx context.Context) { + var backlog []func(context.Context) + + defer close(cs.done) + for ctx.Err() == nil { + select { + case <-ctx.Done(): + // Do nothing here. Next iteration of the for loop will not happen, + // since ctx.Err() would be non-nil. + case callback, ok := <-cs.callbacks.Get(): + if !ok { + return + } + cs.callbacks.Load() + callback.(func(ctx context.Context))(ctx) + } + } + + // Fetch pending callbacks if any, and execute them before returning from + // this method and closing cs.done. + cs.closedMu.Lock() + cs.closed = true + backlog = cs.fetchPendingCallbacks() + cs.callbacks.Close() + cs.closedMu.Unlock() + for _, b := range backlog { + b(ctx) + } +} + +func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { + var backlog []func(context.Context) + for { + select { + case b := <-cs.callbacks.Get(): + backlog = append(backlog, b.(func(context.Context))) + cs.callbacks.Load() + default: + return backlog + } + } +} + +// Done returns a channel that is closed after the context passed to +// NewCallbackSerializer is canceled and all callbacks have been executed. +func (cs *CallbackSerializer) Done() <-chan struct{} { + return cs.done +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go new file mode 100644 index 00000000..aef8cec1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -0,0 +1,121 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" +) + +// Subscriber represents an entity that is subscribed to messages published on +// a PubSub. It wraps the callback to be invoked by the PubSub when a new +// message is published. +type Subscriber interface { + // OnMessage is invoked when a new message is published. Implementations + // must not block in this method. + OnMessage(msg any) +} + +// PubSub is a simple one-to-many publish-subscribe system that supports +// messages of arbitrary type. It guarantees that messages are delivered in +// the same order in which they were published. +// +// Publisher invokes the Publish() method to publish new messages, while +// subscribers interested in receiving these messages register a callback +// via the Subscribe() method. +// +// Once a PubSub is stopped, no more messages can be published, but any pending +// published messages will be delivered to the subscribers. Done may be used +// to determine when all published messages have been delivered. +type PubSub struct { + cs *CallbackSerializer + + // Access to the below fields are guarded by this mutex. + mu sync.Mutex + msg any + subscribers map[Subscriber]bool +} + +// NewPubSub returns a new PubSub instance. Users should cancel the +// provided context to shutdown the PubSub. +func NewPubSub(ctx context.Context) *PubSub { + return &PubSub{ + cs: NewCallbackSerializer(ctx), + subscribers: map[Subscriber]bool{}, + } +} + +// Subscribe registers the provided Subscriber to the PubSub. +// +// If the PubSub contains a previously published message, the Subscriber's +// OnMessage() callback will be invoked asynchronously with the existing +// message to begin with, and subsequently for every newly published message. +// +// The caller is responsible for invoking the returned cancel function to +// unsubscribe itself from the PubSub. +func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.subscribers[sub] = true + + if ps.msg != nil { + msg := ps.msg + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[sub] { + return + } + sub.OnMessage(msg) + }) + } + + return func() { + ps.mu.Lock() + defer ps.mu.Unlock() + delete(ps.subscribers, sub) + } +} + +// Publish publishes the provided message to the PubSub, and invokes +// callbacks registered by subscribers asynchronously. +func (ps *PubSub) Publish(msg any) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.msg = msg + for sub := range ps.subscribers { + s := sub + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[s] { + return + } + s.OnMessage(msg) + }) + } +} + +// Done returns a channel that is closed after the context passed to NewPubSub +// is canceled and all updates have been sent to subscribers. +func (ps *PubSub) Done() <-chan struct{} { + return ps.cs.Done() +} diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go new file mode 100644 index 00000000..6c272476 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -0,0 +1,301 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package idle contains a component for managing idleness (entering and exiting) +// based on RPC activity. +package idle + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// Enforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type Enforcer interface { + ExitIdleMode() error + EnterIdleMode() error +} + +// Manager defines the functionality required to track RPC activity on a +// channel. +type Manager interface { + OnCallBegin() error + OnCallEnd() + Close() +} + +type noopManager struct{} + +func (noopManager) OnCallBegin() error { return nil } +func (noopManager) OnCallEnd() {} +func (noopManager) Close() {} + +// manager implements the Manager interface. It uses atomic operations to +// synchronize access to shared state and a mutex to guarantee mutual exclusion +// in a critical section. +type manager struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer Enforcer // Functionality provided by grpc.ClientConn. + timeout int64 // Idle timeout duration nanos stored as an int64. + logger grpclog.LoggerV2 + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and OnCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// ManagerOptions is a collection of options used by +// NewManager. +type ManagerOptions struct { + Enforcer Enforcer + Timeout time.Duration + Logger grpclog.LoggerV2 +} + +// NewManager creates a new idleness manager implementation for the +// given idle timeout. +func NewManager(opts ManagerOptions) Manager { + if opts.Timeout == 0 { + return noopManager{} + } + + m := &manager{ + enforcer: opts.Enforcer, + timeout: int64(opts.Timeout), + logger: opts.Logger, + } + m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout) + return m +} + +// resetIdleTimer resets the idle timer to the given duration. This method +// should only be called from the timer callback. +func (m *manager) resetIdleTimer(d time.Duration) { + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if m.timer == nil { + // Only close sets timer to nil. We are done. + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback, which means the timer has + // already fired. + m.timer.Reset(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (m *manager) handleIdleTimeout() { + if m.isClosed() { + return + } + + if atomic.LoadInt32(&m.activeCallsCount) > 0 { + m.resetIdleTimer(time.Duration(m.timeout)) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano())) + return + } + + // This CAS operation is extremely likely to succeed given that there has + // been no activity since the last time we were here. Setting the + // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the + // channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity at the top of this method, or one was ongoing from before + // the last time we were here. In both case, reset the timer and return. + m.resetIdleTimer(time.Duration(m.timeout)) + return + } + + // Now that we've set the active calls count to -math.MaxInt32, it's time to + // actually move to idle mode. + if m.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.resetIdleTimer(time.Duration(m.timeout)) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (m *manager) tryEnterIdleMode() bool { + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + return false + } + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // An very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + return false + } + + // No new RPCs have come in since we last set the active calls count value + // -math.MaxInt32 in the timer callback. And since we have the lock, it is + // safe to enter idle mode now. + if err := m.enforcer.EnterIdleMode(); err != nil { + m.logger.Errorf("Failed to enter idle mode: %v", err) + return false + } + + // Successfully entered idle mode. + m.actuallyIdle = true + return true +} + +// OnCallBegin is invoked at the start of every RPC. +func (m *manager) OnCallBegin() error { + if m.isClosed() { + return nil + } + + if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := m.exitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&m.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil +} + +// exitIdleMode instructs the channel to exit idle mode. +// +// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. +func (m *manager) exitIdleMode() error { + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if !m.actuallyIdle { + // This can happen in two scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and OnCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in OnCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // + // Either way, nothing to do here. + return nil + } + + if err := m.enforcer.ExitIdleMode(); err != nil { + return fmt.Errorf("channel failed to exit idle mode: %v", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout) + return nil +} + +// OnCallEnd is invoked at the end of every RPC. +func (m *manager) OnCallEnd() { + if m.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&m.activeCallsCount, -1) +} + +func (m *manager) isClosed() bool { + return atomic.LoadInt32(&m.closed) == 1 +} + +func (m *manager) Close() { + atomic.StoreInt32(&m.closed, 1) + + m.idleMu.Lock() + m.timer.Stop() + m.timer = nil + m.idleMu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index fd0ee3dc..c8a8c76d 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -30,7 +30,7 @@ import ( var ( // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. @@ -38,8 +38,12 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second + // KeepaliveMinServerPingTime is the minimum ping interval for servers. + // This must be 1s by default, but tests may wish to set it lower for + // convenience. + KeepaliveMinServerPingTime = time.Second // ParseServiceConfig parses a JSON representation of the service config. - ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult + ParseServiceConfig any // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the @@ -49,44 +53,81 @@ var ( // given name. This is set by package certprovider for use from xDS // bootstrap code while parsing certificate provider configs in the // bootstrap file. - GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder + GetCertificateProviderBuilder any // func(string) certprovider.Builder // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo // GetServerCredentials returns the transport credentials configured on a // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. - GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials + // CanonicalString returns the canonical string of the code defined here: + // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + CanonicalString any // func (codes.Code) string // DrainServerTransports initiates a graceful close of existing connections // on a gRPC server accepted on the provided listener address. An // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. - DrainServerTransports interface{} // func(*grpc.Server, string) + DrainServerTransports any // func(*grpc.Server, string) // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. - AddGlobalServerOptions interface{} // func(opt ...ServerOption) + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalServerOptions any // func(opt ...ServerOption) // ClearGlobalServerOptions clears the array of extra ServerOption. This // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ClearGlobalServerOptions func() // AddGlobalDialOptions adds an array of DialOption that will be effective // globally for newly created client channels. The priority will be: 1. // user-provided; 2. this method; 3. default values. - AddGlobalDialOptions interface{} // func(opt ...DialOption) + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalDialOptions any // func(opt ...DialOption) + // DisableGlobalDialOptions returns a DialOption that prevents the + // ClientConn from applying the global DialOptions (set via + // AddGlobalDialOptions). + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + DisableGlobalDialOptions any // func() grpc.DialOption // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ClearGlobalDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a + // single dial option. + JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption // JoinServerOptions combines the server options passed as arguments into a // single server option. - JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption // WithBinaryLogger returns a DialOption that specifies the binary logger // for a ClientConn. - WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption // BinaryLogger returns a ServerOption that can set the binary logger for a // server. - BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + BinaryLogger any // func(binarylog.Logger) grpc.ServerOption + + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using // the provided xds bootstrap config instead of the global configuration from @@ -97,7 +138,7 @@ var ( // // This function should ONLY be used for testing and may not work with some // other features, including the CSDS service. - NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment @@ -127,6 +168,13 @@ var ( // // TODO: Remove this function once the RBAC env var is removed. UnregisterRBACHTTPFilterForTesting func() + + // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. + ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) + + // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra + // metadata to RPCs. + GRPCResolverSchemeExtraMetadata string = "xds" ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -137,7 +185,7 @@ var ( // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error +type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error const ( // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index b2980f8a..900bfb71 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -35,7 +35,7 @@ const mdKey = mdKeyType("grpc.internal.address.metadata") type mdValue metadata.MD -func (m mdValue) Equal(o interface{}) bool { +func (m mdValue) Equal(o any) bool { om, ok := o.(mdValue) if !ok { return false @@ -76,33 +76,11 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address { return addr } -// Validate returns an error if the input md contains invalid keys or values. -// -// If the header is not a pseudo-header, the following items are checked: -// - header names must contain one or more characters from this set [0-9 a-z _ - .]. -// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. -// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. +// Validate validates every pair in md with ValidatePair. func Validate(md metadata.MD) error { for k, vals := range md { - // pseudo-header will be ignored - if k[0] == ':' { - continue - } - // check key, for i that saving a conversion if not using for range - for i := 0; i < len(k); i++ { - r := k[i] - if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { - return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) - } - } - if strings.HasSuffix(k, "-bin") { - continue - } - // check value - for _, val := range vals { - if hasNotPrintable(val) { - return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) - } + if err := ValidatePair(k, vals...); err != nil { + return err } } return nil @@ -118,3 +96,37 @@ func hasNotPrintable(msg string) bool { } return false } + +// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : +// +// - key must contain one or more characters. +// - the characters in the key must be contained in [0-9 a-z _ - .]. +// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. +// - the characters in the every value must be printable (in [%x20-%x7E]). +func ValidatePair(key string, vals ...string) error { + // key should not be empty + if key == "" { + return fmt.Errorf("there is an empty key in the header") + } + // pseudo-header will be ignored + if key[0] == ':' { + return nil + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(key); i++ { + r := key[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) + } + } + if strings.HasSuffix(key, "-bin") { + return nil + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key) + } + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go index 0177af4b..70331913 100644 --- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -35,7 +35,7 @@ const jsonIndent = " " // ToJSON marshals the input into a json string. // // If marshal fails, it falls back to fmt.Sprintf("%+v"). -func ToJSON(e interface{}) string { +func ToJSON(e any) string { switch ee := e.(type) { case protov1.Message: mm := jsonpb.Marshaler{Indent: jsonIndent} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index c7a18a94..f0603871 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -92,7 +92,7 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -101,7 +101,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientInterceptor is an interceptor for gRPC client streams. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 75301c51..99e1e5b3 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -62,7 +62,8 @@ const ( defaultPort = "443" defaultDNSSvrPort = "53" golang = "GO" - // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + // txtPrefix is the prefix string to be prepended to the host name for txt + // record lookup. txtPrefix = "_grpc_config." // In DNS, service config is encoded in a TXT record via the mechanism // described in RFC-1464 using the attribute name grpc_config. @@ -86,14 +87,14 @@ var ( minDNSResRate = 30 * time.Second ) -var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { - return func(ctx context.Context, network, address string) (net.Conn, error) { +var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer - return dialer.DialContext(ctx, network, authority) + return dialer.DialContext(ctx, network, address) } } -var customAuthorityResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (netResolver, error) { host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -103,7 +104,7 @@ var customAuthorityResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: customAuthorityDialler(authorityWithPort), + Dial: addressDialer(authorityWithPort), }, nil } @@ -114,9 +115,10 @@ func NewBuilder() resolver.Builder { type dnsBuilder struct{} -// Build creates and starts a DNS resolver that watches the name resolution of the target. +// Build creates and starts a DNS resolver that watches the name resolution of +// the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - host, port, err := parseTarget(target.Endpoint, defaultPort) + host, port, err := parseTarget(target.Endpoint(), defaultPort) if err != nil { return nil, err } @@ -140,10 +142,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.Authority == "" { + if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.Authority) + d.resolver, err = newNetResolver(target.URL.Host) if err != nil { return nil, err } @@ -180,19 +182,22 @@ type dnsResolver struct { ctx context.Context cancel context.CancelFunc cc resolver.ClientConn - // rn channel is used by ResolveNow() to force an immediate resolution of the target. + // rn channel is used by ResolveNow() to force an immediate resolution of the + // target. rn chan struct{} - // wg is used to enforce Close() to return after the watcher() goroutine has finished. - // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we - // replace the real lookup functions with mocked ones to facilitate testing. - // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes - // will warns lookup (READ the lookup function pointers) inside watcher() goroutine - // has data race with replaceNetFunc (WRITE the lookup function pointers). + // wg is used to enforce Close() to return after the watcher() goroutine has + // finished. Otherwise, data race will be possible. [Race Example] in + // dns_resolver_test we replace the real lookup functions with mocked ones to + // facilitate testing. If Close() doesn't wait for watcher() goroutine + // finishes, race detector sometimes will warns lookup (READ the lookup + // function pointers) inside watcher() goroutine has data race with + // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup disableServiceConfig bool } -// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +// ResolveNow invoke an immediate resolution of the target that this +// dnsResolver watches. func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { select { case d.rn <- struct{}{}: @@ -220,8 +225,8 @@ func (d *dnsResolver) watcher() { var timer *time.Timer if err == nil { - // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least - // to prevent constantly re-resolving. + // Success resolving, wait for the next ResolveNow. However, also wait 30 + // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 timer = newTimerDNSResRate(minDNSResRate) select { @@ -231,7 +236,8 @@ func (d *dnsResolver) watcher() { case <-d.rn: } } else { - // Poll on an error found in DNS Resolver or an error received from ClientConn. + // Poll on an error found in DNS Resolver or an error received from + // ClientConn. timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) backoffIndex++ } @@ -278,7 +284,8 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { } func handleDNSError(err error, lookupType string) error { - if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + dnsErr, ok := err.(*net.DNSError) + if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). @@ -307,10 +314,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { res += s } - // TXT record must have "grpc_config=" attribute in order to be used as service config. + // TXT record must have "grpc_config=" attribute in order to be used as + // service config. if !strings.HasPrefix(res, txtAttribute) { logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) - // This is not an error; it is the equivalent of not having a service config. + // This is not an error; it is the equivalent of not having a service + // config. return nil } sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) @@ -352,9 +361,10 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and +// ok = true. func formatIP(addr string) (addrIP string, ok bool) { ip := net.ParseIP(addr) if ip == nil { @@ -366,10 +376,10 @@ func formatIP(addr string) (addrIP string, ok bool) { return "[" + addr + "]", true } -// parseTarget takes the user input target string and default port, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. +// parseTarget takes the user input target string and default port, returns +// formatted host and port info. If target doesn't specify a port, set the port +// to be the defaultPort. If target is in IPv6 format and host-name is enclosed +// in square brackets, brackets are stripped when setting the host. // examples: // target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" @@ -385,12 +395,14 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { } if host, port, err = net.SplitHostPort(target); err == nil { if port == "" { - // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + // If the port field is empty (target ends with colon), e.g. "[::1]:", + // this is an error. return "", "", errEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + // Keep consistent with net.Dial(): If the host is empty, as in ":80", + // the local system is assumed. host = "localhost" } return host, port, nil diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index 520d9229..afac5657 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -20,13 +20,20 @@ // name without scheme back to gRPC as resolved address. package passthrough -import "google.golang.org/grpc/resolver" +import ( + "errors" + + "google.golang.org/grpc/resolver" +) const scheme = "passthrough" type passthroughBuilder struct{} func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint() == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } r := &passthroughResolver{ target: target, cc: cc, @@ -45,7 +52,7 @@ type passthroughResolver struct { } func (r *passthroughResolver) start() { - r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 7f1a702c..16091168 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -34,8 +34,8 @@ type builder struct { } func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - if target.Authority != "" { - return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) } // gRPC was parsing the dial target manually before PR #4817, and we diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go new file mode 100644 index 00000000..11d82afc --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package serviceconfig + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Duration defines JSON marshal and unmarshal methods to conform to the +// protobuf JSON spec defined [here]. +// +// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration +type Duration time.Duration + +func (d Duration) String() string { + return fmt.Sprint(time.Duration(d)) +} + +// MarshalJSON converts from d to a JSON string output. +func (d Duration) MarshalJSON() ([]byte, error) { + ns := time.Duration(d).Nanoseconds() + sec := ns / int64(time.Second) + ns = ns % int64(time.Second) + + var sign string + if sec < 0 || ns < 0 { + sign, sec, ns = "-", -1*sec, -1*ns + } + + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision. + str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, ".000") + return []byte(fmt.Sprintf("\"%ss\"", str)), nil +} + +// UnmarshalJSON unmarshals b as a duration JSON string into d. +func (d *Duration) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !strings.HasSuffix(s, "s") { + return fmt.Errorf("malformed duration %q: missing seconds unit", s) + } + neg := false + if s[0] == '-' { + neg = true + s = s[1:] + } + ss := strings.SplitN(s[:len(s)-1], ".", 3) + if len(ss) > 2 { + return fmt.Errorf("malformed duration %q: too many decimals", s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var sec, ns int64 + if len(ss[0]) > 0 { + var err error + if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + // Maximum seconds value per the durationpb spec. + const maxProtoSeconds = 315_576_000_000 + if sec > maxProtoSeconds { + return fmt.Errorf("out of range: %q", s) + } + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return fmt.Errorf("malformed duration %q: too many digits after decimal", s) + } + var err error + if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + for i := 9; i > len(ss[1]); i-- { + ns *= 10 + } + hasDigits = true + } + if !hasDigits { + return fmt.Errorf("malformed duration %q: contains no numbers", s) + } + + if neg { + sec *= -1 + ns *= -1 + } + + // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. + const maxSeconds = math.MaxInt64 / int64(time.Second) + const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) + const minSeconds = math.MinInt64 / int64(time.Second) + const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) + + if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { + *d = Duration(math.MaxInt64) + } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { + *d = Duration(math.MinInt64) + } else { + *d = Duration(sec*int64(time.Second) + ns) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index b0ead4f5..4cf85cad 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -49,7 +49,7 @@ func New(c codes.Code, msg string) *Status { } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -64,7 +64,7 @@ func Err(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Err(c, fmt.Sprintf(format, a...)) } @@ -120,11 +120,11 @@ func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { +func (s *Status) Details() []any { if s == nil || s.s == nil { return nil } - details := make([]interface{}, 0, len(s.s.Details)) + details := make([]any, 0, len(s.s.Details)) for _, any := range s.s.Details { detail := &ptypes.DynamicAny{} if err := ptypes.UnmarshalAny(any, detail); err != nil { diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 409769f4..b330cced 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -22,6 +22,7 @@ import ( "bytes" "errors" "fmt" + "net" "runtime" "strconv" "sync" @@ -29,6 +30,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/status" ) @@ -38,7 +40,7 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { } type itemNode struct { - it interface{} + it any next *itemNode } @@ -47,7 +49,7 @@ type itemList struct { tail *itemNode } -func (il *itemList) enqueue(i interface{}) { +func (il *itemList) enqueue(i any) { n := &itemNode{it: i} if il.tail == nil { il.head, il.tail = n, n @@ -59,11 +61,11 @@ func (il *itemList) enqueue(i interface{}) { // peek returns the first item in the list without removing it from the // list. -func (il *itemList) peek() interface{} { +func (il *itemList) peek() any { return il.head.it } -func (il *itemList) dequeue() interface{} { +func (il *itemList) dequeue() any { if il.head == nil { return nil } @@ -191,7 +193,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn bool + closeConn error // if set, loopyWriter will exit, resulting in conn closure } func (*goAway) isTransportResponseFrame() bool { return false } @@ -209,6 +211,14 @@ type outFlowControlSizeRequest struct { func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + type outStreamState int const ( @@ -326,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error { return err } -func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { +func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { @@ -363,7 +373,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b } // Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { +func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { c.mu.Lock() if c.err != nil { c.mu.Unlock() @@ -377,7 +387,7 @@ func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bo return true, nil } -func (c *controlBuffer) get(block bool) (interface{}, error) { +func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() if c.err != nil { @@ -408,7 +418,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - return nil, ErrConnClosing + return nil, errors.New("transport closed by client") } } } @@ -478,12 +488,14 @@ type loopyWriter struct { hEnc *hpack.Encoder // HPACK encoder. bdpEst *bdpEstimator draining bool + conn net.Conn + logger *grpclog.PrefixLogger // Side-specific handlers ssGoAwayHandler func(*goAway) (bool, error) } -func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { var buf bytes.Buffer l := &loopyWriter{ side: s, @@ -496,6 +508,8 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato hBuf: &buf, hEnc: hpack.NewEncoder(&buf), bdpEst: bdpEst, + conn: conn, + logger: logger, } return l } @@ -513,23 +527,26 @@ const minBatchSize = 1000 // 2. Stream level flow control quota available. // // In each iteration of run loop, other than processing the incoming control -// frame, loopy calls processData, which processes one node from the activeStreams linked-list. -// This results in writing of HTTP2 frames into an underlying write buffer. -// When there's no more control frames to read from controlBuf, loopy flushes the write buffer. -// As an optimization, to increase the batch size for each flush, loopy yields the processor, once -// if the batch size is too low to give stream goroutines a chance to fill it up. +// frame, loopy calls processData, which processes one node from the +// activeStreams linked-list. This results in writing of HTTP2 frames into an +// underlying write buffer. When there's no more control frames to read from +// controlBuf, loopy flushes the write buffer. As an optimization, to increase +// the batch size for each flush, loopy yields the processor, once if the batch +// size is too low to give stream goroutines a chance to fill it up. +// +// Upon exiting, if the error causing the exit is not an I/O error, run() +// flushes and closes the underlying connection. Otherwise, the connection is +// left open to allow the I/O error to be encountered by the reader instead. func (l *loopyWriter) run() (err error) { defer func() { - if err == ErrConnClosing { - // Don't log ErrConnClosing as error since it happens - // 1. When the connection is closed by some other known issue. - // 2. User closed the connection. - // 3. A graceful close of connection. - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter.run returning. %v", err) - } - err = nil + if l.logger.V(logLevel) { + l.logger.Infof("loopyWriter exiting with error: %v", err) } + if !isIOError(err) { + l.framer.writer.Flush() + l.conn.Close() + } + l.cbuf.finish() }() for { it, err := l.cbuf.get(true) @@ -574,7 +591,6 @@ func (l *loopyWriter) run() (err error) { } l.framer.writer.Flush() break hasdata - } } } @@ -583,11 +599,11 @@ func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) } -func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { // Otherwise update the quota. if w.streamID == 0 { l.sendQuota += w.increment - return nil + return } // Find the stream and update it. if str, ok := l.estdStreams[w.streamID]; ok { @@ -595,10 +611,9 @@ func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { str.state = active l.activeStreams.enqueue(str) - return nil + return } } - return nil } func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { @@ -606,13 +621,11 @@ func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { } func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { - if err := l.applySettings(s.ss); err != nil { - return err - } + l.applySettings(s.ss) return l.framer.fr.WriteSettingsAck() } -func (l *loopyWriter) registerStreamHandler(h *registerStream) error { +func (l *loopyWriter) registerStreamHandler(h *registerStream) { str := &outStream{ id: h.streamID, state: empty, @@ -620,15 +633,14 @@ func (l *loopyWriter) registerStreamHandler(h *registerStream) error { wq: h.wq, } l.estdStreams[h.streamID] = str - return nil } func (l *loopyWriter) headerHandler(h *headerFrame) error { if l.side == serverSide { str, ok := l.estdStreams[h.streamID] if !ok { - if logger.V(logLevel) { - logger.Warningf("transport: loopy doesn't recognize the stream: %d", h.streamID) + if l.logger.V(logLevel) { + l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID) } return nil } @@ -655,19 +667,20 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { itl: &itemList{}, wq: h.wq, } - str.itl.enqueue(h) - return l.originateStream(str) + return l.originateStream(str, h) } -func (l *loopyWriter) originateStream(str *outStream) error { - hdr := str.itl.dequeue().(*headerFrame) - if err := hdr.initStream(str.id); err != nil { - if err == ErrConnClosing { - return err - } - // Other errors(errStreamDrain) need not close transport. +func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { + // l.draining is set when handling GoAway. In which case, we want to avoid + // creating new streams. + if l.draining { + // TODO: provide a better error with the reason we are in draining. + hdr.onOrphaned(errStreamDrain) return nil } + if err := hdr.initStream(str.id); err != nil { + return err + } if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err } @@ -682,8 +695,8 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He l.hBuf.Reset() for _, f := range hf { if err := l.hEnc.WriteField(f); err != nil { - if logger.V(logLevel) { - logger.Warningf("transport: loopyWriter.writeHeader encountered error while encoding headers: %v", err) + if l.logger.V(logLevel) { + l.logger.Warningf("Encountered error while encoding headers: %v", err) } } } @@ -721,10 +734,10 @@ func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.He return nil } -func (l *loopyWriter) preprocessData(df *dataFrame) error { +func (l *loopyWriter) preprocessData(df *dataFrame) { str, ok := l.estdStreams[df.streamID] if !ok { - return nil + return } // If we got data for a stream it means that // stream was originated and the headers were sent out. @@ -733,7 +746,6 @@ func (l *loopyWriter) preprocessData(df *dataFrame) error { str.state = active l.activeStreams.enqueue(str) } - return nil } func (l *loopyWriter) pingHandler(p *ping) error { @@ -744,9 +756,8 @@ func (l *loopyWriter) pingHandler(p *ping) error { } -func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) { o.resp <- l.sendQuota - return nil } func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { @@ -763,8 +774,9 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { return err } } - if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { - return ErrConnClosing + if l.draining && len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. + return errors.New("finished processing active streams while in draining mode") } return nil } @@ -799,7 +811,8 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { - return ErrConnClosing + // Flush and close the connection; we are done with it. + return errors.New("received GOAWAY with no active streams") } } return nil @@ -817,10 +830,10 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } -func (l *loopyWriter) handle(i interface{}) error { +func (l *loopyWriter) handle(i any) error { switch i := i.(type) { case *incomingWindowUpdate: - return l.incomingWindowUpdateHandler(i) + l.incomingWindowUpdateHandler(i) case *outgoingWindowUpdate: return l.outgoingWindowUpdateHandler(i) case *incomingSettings: @@ -830,7 +843,7 @@ func (l *loopyWriter) handle(i interface{}) error { case *headerFrame: return l.headerHandler(i) case *registerStream: - return l.registerStreamHandler(i) + l.registerStreamHandler(i) case *cleanupStream: return l.cleanupStreamHandler(i) case *earlyAbortStream: @@ -838,19 +851,24 @@ func (l *loopyWriter) handle(i interface{}) error { case *incomingGoAway: return l.incomingGoAwayHandler(i) case *dataFrame: - return l.preprocessData(i) + l.preprocessData(i) case *ping: return l.pingHandler(i) case *goAway: return l.goAwayHandler(i) case *outFlowControlSizeRequest: - return l.outFlowControlSizeRequestHandler(i) + l.outFlowControlSizeRequestHandler(i) + case closeConnection: + // Just return a non-I/O error and run() will flush and close the + // connection. + return ErrConnClosing default: return fmt.Errorf("transport: unknown control message type %T", i) } + return nil } -func (l *loopyWriter) applySettings(ss []http2.Setting) error { +func (l *loopyWriter) applySettings(ss []http2.Setting) { for _, s := range ss { switch s.ID { case http2.SettingInitialWindowSize: @@ -869,7 +887,6 @@ func (l *loopyWriter) applySettings(ss []http2.Setting) error { updateHeaderTblSize(l.hEnc, s.Val) } } - return nil } // processData removes the first stream from active streams, writes out at most 16KB @@ -903,7 +920,7 @@ func (l *loopyWriter) processData() (bool, error) { return false, err } if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { - return false, nil + return false, err } } else { l.activeStreams.enqueue(str) diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go index 9fa306b2..bc8ee074 100644 --- a/vendor/google.golang.org/grpc/internal/transport/defaults.go +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -47,3 +47,9 @@ const ( defaultClientMaxHeaderListSize = uint32(16 << 20) defaultServerMaxHeaderListSize = uint32(16 << 20) ) + +// MaxStreamID is the upper bound for the stream ID before the current +// transport gracefully closes and new transport is created for subsequent RPCs. +// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit +// integer. It's exported so that tests can override it. +var MaxStreamID = uint32(math.MaxInt32 * 3 / 4) diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index fb272235..98f80e3f 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -39,6 +39,7 @@ import ( "golang.org/x/net/http2" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -46,24 +47,32 @@ import ( "google.golang.org/grpc/status" ) -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { - return nil, errors.New("invalid gRPC request content-type") + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusUnsupportedMediaType) + return nil, errors.New(msg) } if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) } st := &serverHandlerTransport{ @@ -75,11 +84,14 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s contentSubtype: contentSubtype, stats: stats, } + st.logger = prefixLoggerForServerHandlerTransport(st) if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + msg := fmt.Sprintf("malformed grpc-timeout: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } st.timeoutSet = true st.timeout = to @@ -97,7 +109,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } metakv = append(metakv, k, v) } @@ -138,15 +152,19 @@ type serverHandlerTransport struct { // TODO make sure this is consistent across handler_server and http2_server contentSubtype string - stats []stats.Handler + stats []stats.Handler + logger *grpclog.PrefixLogger } -func (ht *serverHandlerTransport) Close() { - ht.closeOnce.Do(ht.closeCloseChanOnce) +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if ht.logger.V(logLevel) { + ht.logger.Infof("Closing: %v", err) + } + close(ht.closedCh) + }) } -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } - func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } // strAddr is a net.Addr backed by either a TCP "ip:port" string, or @@ -236,7 +254,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) } } - ht.Close() + ht.Close(errors.New("finished writing status")) return err } @@ -346,7 +364,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace case <-ht.req.Context().Done(): } cancel() - ht.Close() + ht.Close(errors.New("request is done processing")) }() req := ht.req @@ -435,7 +453,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain() { +func (ht *serverHandlerTransport) Drain(debugData string) { panic("Drain() is not implemented") } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index d518b07e..badab8ac 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -38,6 +38,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" @@ -59,11 +60,15 @@ var clientConnectionCounter uint64 // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - cancel context.CancelFunc - ctxDone <-chan struct{} // Cache the ctx.Done() chan. - userAgent string + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter @@ -136,12 +141,12 @@ type http2Client struct { channelzID *channelz.Identifier czData *channelzData - onGoAway func(GoAwayReason) - onClose func() + onClose func(GoAwayReason) bufferPool *bufferPool connectionID uint64 + logger *grpclog.PrefixLogger } func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { @@ -193,7 +198,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -213,7 +218,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) } - return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err) } // Any further errors will close the underlying connection @@ -238,8 +243,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts go func(conn net.Conn) { defer ctxMonitorDone.Fire() // Signal this goroutine has exited. <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. - if connectCtx.Err() != nil { + if err := connectCtx.Err(); err != nil { // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("Aborting due to connect deadline expiring: %v", err) + } conn.Close() } }(conn) @@ -314,6 +322,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts cancel: cancel, userAgent: opts.UserAgent, registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -321,7 +330,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*Stream), @@ -335,11 +344,11 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), czData: new(channelzData), - onGoAway: onGoAway, keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), onClose: onClose, } + t.logger = prefixLoggerForClientTransport(t) // Add peer information to the http2client context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -438,17 +447,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts return nil, err } go func() { - t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) - err := t.loopy.run() - if err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } - } - // Do not close the transport. Let reader goroutine handle it since - // there might be data in the buffers. - t.conn.Close() - t.controlBuf.finish() + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) + t.loopy.run() close(t.writerDone) }() return t, nil @@ -702,6 +702,18 @@ func (e NewStreamError) Error() string { // streams. All non-nil errors returned will be *NewStreamError. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} @@ -726,15 +738,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, endStream: false, initStream: func(id uint32) error { t.mu.Lock() - if state := t.state; state != reachable { + // TODO: handle transport closure in loopy instead and remove this + // initStream is never called when transport is draining. + if t.state == closing { t.mu.Unlock() - // Do a quick cleanup. - err := error(errStreamDrain) - if state == closing { - err = ErrConnClosing - } - cleanup(err) - return err + cleanup(ErrConnClosing) + return ErrConnClosing } if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) @@ -752,7 +761,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, } firstTry := true var ch chan struct{} - checkForStreamQuota := func(it interface{}) bool { + transportDrainRequired := false + checkForStreamQuota := func(it any) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ @@ -767,10 +777,15 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, h := it.(*headerFrame) h.streamID = t.nextID t.nextID += 2 + + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.mu.Lock() - if t.activeStreams == nil { // Can be niled from Close(). + if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). t.mu.Unlock() return false // Don't create a stream if the transport is already closed. } @@ -785,7 +800,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } var hdrListSizeErr error - checkForHeaderListSize := func(it interface{}) bool { + checkForHeaderListSize := func(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -800,7 +815,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } for { - success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + success, err := t.controlBuf.executeAndPut(func(it any) bool { return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { @@ -846,6 +861,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, sh.HandleRPC(s.ctx, outHeader) } } + if transportDrainRequired { + if t.logger.V(logLevel) { + t.logger.Infof("Draining transport: t.nextID > MaxStreamID") + } + t.GracefulClose() + } return s, nil } @@ -906,7 +927,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. rst: rst, rstCode: rstCode, } - addBackStreamQuota := func(interface{}) bool { + addBackStreamQuota := func(any) bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { @@ -934,9 +955,14 @@ func (t *http2Client) Close(err error) { t.mu.Unlock() return } + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) + } // Call t.onClose ASAP to prevent the client from attempting to create new // streams. - t.onClose() + if t.state != draining { + t.onClose(GoAwayInvalid) + } t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -986,11 +1012,15 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } + if t.logger.V(logLevel) { + t.logger.Infof("GracefulClose called") + } + t.onClose(GoAwayInvalid) t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close(ErrConnClosing) + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) return } t.controlBuf.put(&incomingGoAway{}) @@ -1050,7 +1080,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - updateIWS := func(interface{}) bool { + updateIWS := func(any) bool { t.initialWindowSize = int32(n) t.mu.Lock() for _, s := range t.activeStreams { @@ -1147,8 +1177,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { } statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { - if logger.V(logLevel) { - logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + if t.logger.V(logLevel) { + t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode) } statusCode = codes.Unknown } @@ -1203,7 +1233,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } updateFuncs = append(updateFuncs, updateStreamQuota) } - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -1230,10 +1260,12 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { t.mu.Unlock() return } - if f.ErrCode == http2.ErrCodeEnhanceYourCalm { - if logger.V(logLevel) { - logger.Infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") - } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { + // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug + // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is + // enabled by default and double the configure KEEPALIVE_TIME used for new connections + // on that channel. + logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".") } id := f.LastStreamID if id > 0 && id%2 == 0 { @@ -1266,8 +1298,10 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // Notify the clientconn about the GOAWAY before we set the state to // draining, to allow the client to stop attempting to create streams // before disallowing new streams on this connection. - t.onGoAway(t.goAwayReason) - t.state = draining + if t.state != draining { + t.onClose(t.goAwayReason) + t.state = draining + } } // All streams with IDs greater than the GoAwayId // and smaller than the previous GoAway ID should be killed. @@ -1303,7 +1337,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // setGoAwayReason sets the value of t.goAwayReason based // on the GoAway frame received. -// It expects a lock on transport's mutext to be held by +// It expects a lock on transport's mutex to be held by // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason @@ -1471,14 +1505,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - isHeader := false - - // If headerChan hasn't been closed yet - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { - s.headerValid = true - if !endStream { - // HEADERS frame block carries a Response-Headers. - isHeader = true + // For headers, set them in s.header and close headerChan. For trailers or + // trailers-only, closeStream will set the trailers and close headerChan as + // needed. + if !endStream { + // If headerChan hasn't been closed yet (expected, given we checked it + // above, but something else could have potentially closed the whole + // stream). + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. @@ -1486,15 +1521,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if len(mdata) > 0 { s.header = mdata } - } else { - // HEADERS frame block carries a Trailers-Only. - s.noHeaders = true + close(s.headerChan) } - close(s.headerChan) } for _, sh := range t.statsHandlers { - if isHeader { + if !endStream { inHeader := &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), @@ -1520,9 +1552,10 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { statusGen = status.New(rawStatusCode, grpcMessage) } - // if client received END_STREAM from server while stream was still active, send RST_STREAM - rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) + // If client received END_STREAM from server while stream was still active, + // send RST_STREAM. + rstStream := s.getState() == streamActive + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, statusGen, mdata, true) } // readServerPreface reads and handles the initial settings frame from the @@ -1756,3 +1789,9 @@ func (t *http2Client) getOutFlowWindow() int64 { return -2 } } + +func (t *http2Client) stateForTesting() transportState { + t.mu.Lock() + defer t.mu.Unlock() + return t.state +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 3dd15647..c06db679 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -21,6 +21,7 @@ package transport import ( "bytes" "context" + "errors" "fmt" "io" "math" @@ -34,13 +35,16 @@ import ( "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -101,13 +105,13 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when Drain() is called the first time. - // After which the server writes out the first GoAway(with ID 2^31-1) frame. - // Then an independent goroutine will be launched to later send the second GoAway. - // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is - // already underway. - drainChan chan struct{} + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event state transportState activeStreams map[uint32]*Stream // idle is the time instant when the connection went idle. @@ -127,6 +131,8 @@ type http2Server struct { // This lock may not be taken if mu is already held. maxStreamMu sync.Mutex maxStreamID uint32 // max stream ID ever seen + + logger *grpclog.PrefixLogger } // NewServerTransport creates a http2 transport with conn and configuration @@ -159,21 +165,16 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, Val: http2MaxFrameLen, }} - // TODO(zhaoq): Have a better way to signal "no limit" because 0 is - // permitted in the HTTP2 spec. - maxStreams := config.MaxStreams - if maxStreams == 0 { - maxStreams = math.MaxUint32 - } else { + if config.MaxStreams != math.MaxUint32 { isettings = append(isettings, http2.Setting{ ID: http2.SettingMaxConcurrentStreams, - Val: maxStreams, + Val: config.MaxStreams, }) } dynamicWindow := true @@ -232,7 +233,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, kp.Timeout = defaultServerKeepaliveTimeout } if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } } @@ -252,7 +253,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, framer: framer, readerDone: make(chan struct{}), writerDone: make(chan struct{}), - maxStreams: maxStreams, + maxStreams: config.MaxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, @@ -265,6 +266,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, czData: new(channelzData), bufferPool: newBufferPool(), } + t.logger = prefixLoggerForServerTransport(t) // Add peer information to the http2server context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -293,7 +295,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, defer func() { if err != nil { - t.Close() + t.Close(err) } }() @@ -329,23 +331,18 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.handleSettings(sf) go func() { - t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - if err := t.loopy.run(); err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } - } - t.conn.Close() - t.controlBuf.finish() + t.loopy.run() close(t.writerDone) }() go t.keepalive() return t, nil } -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -361,15 +358,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeFrameSize, onWrite: func() {}, }) - return false + return nil } if streamID%2 != 1 || streamID <= t.maxStreamID { // illegal gRPC stream id. - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) - } - return true + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) } t.maxStreamID = streamID @@ -381,13 +375,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( fc: &inFlow{limit: uint32(t.initialWindowSize)}, } var ( - // If a gRPC Response-Headers has already been received, then it means - // that the peer is speaking gRPC and we are in gRPC mode. - isGRPC = false - mdata = make(map[string][]string) - httpMethod string - // headerError is set if an error is encountered while parsing the headers - headerError bool + // if false, content-type was missing or invalid + isGRPC = false + contentType = "" + mdata = make(metadata.MD, len(frame.Fields)) + httpMethod string + // these are set if an error is encountered while parsing the headers + protocolError bool + headerError *status.Status timeoutSet bool timeout time.Duration @@ -398,11 +393,23 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( case "content-type": contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) if !validContentType { + contentType = hf.Value break } mdata[hf.Name] = append(mdata[hf.Name], hf.Value) s.contentSubtype = contentSubtype isGRPC = true + + case "grpc-accept-encoding": + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + if hf.Value == "" { + continue + } + compressors := hf.Value + if s.clientAdvertisedCompressors != "" { + compressors = s.clientAdvertisedCompressors + "," + compressors + } + s.clientAdvertisedCompressors = compressors case "grpc-encoding": s.recvCompress = hf.Value case ":method": @@ -413,23 +420,23 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( timeoutSet = true var err error if timeout, err = decodeTimeout(hf.Value); err != nil { - headerError = true + headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err) } // "Transports must consider requests containing the Connection header // as malformed." - A41 case "connection": - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") + if t.logger.V(logLevel) { + t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec") } - headerError = true + protocolError = true default: if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { break } v, err := decodeMetadataHeader(hf.Name, hf.Value) if err != nil { - headerError = true - logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) + t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) break } mdata[hf.Name] = append(mdata[hf.Name], v) @@ -443,27 +450,47 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // error, this takes precedence over a client not speaking gRPC. if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) - if logger.V(logLevel) { - logger.Errorf("transport: %v", errMsg) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 400, + httpStatus: http.StatusBadRequest, streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), rst: !frame.StreamEnded(), }) - return false + return nil } - if !isGRPC || headerError { + if protocolError { t.controlBuf.put(&cleanupStream{ streamID: streamID, rst: true, rstCode: http2.ErrCodeProtocol, onWrite: func() {}, }) - return false + return nil + } + if !isGRPC { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusUnsupportedMediaType, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType), + rst: !frame.StreamEnded(), + }) + return nil + } + if headerError != nil { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: headerError, + rst: !frame.StreamEnded(), + }) + return nil } // "If :authority is missing, Host must be renamed to :authority." - A41 @@ -503,7 +530,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if t.state != reachable { t.mu.Unlock() s.cancel() - return false + return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -514,13 +541,13 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( onWrite: func() {}, }) s.cancel() - return false + return nil } if httpMethod != http.MethodPost { t.mu.Unlock() - errMsg := fmt.Sprintf("http2Server.operateHeaders parsed a :method field: %v which should be POST", httpMethod) - if logger.V(logLevel) { - logger.Infof("transport: %v", errMsg) + errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ httpStatus: 405, @@ -530,14 +557,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rst: !frame.StreamEnded(), }) s.cancel() - return false + return nil } if t.inTapHandle != nil { var err error if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { t.mu.Unlock() - if logger.V(logLevel) { - logger.Infof("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) } stat, ok := status.FromError(err) if !ok { @@ -550,7 +577,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( status: stat, rst: !frame.StreamEnded(), }) - return false + return nil } } t.activeStreams[streamID] = s @@ -574,7 +601,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( LocalAddr: t.localAddr, Compression: s.recvCompress, WireLength: int(frame.Header().Length), - Header: metadata.MD(mdata).Copy(), + Header: mdata.Copy(), } sh.HandleRPC(s.ctx, inHeader) } @@ -597,7 +624,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( wq: s.wq, }) handle(s) - return false + return nil } // HandleStreams receives incoming streams using the given handler. This is @@ -611,8 +638,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) if err != nil { if se, ok := err.(http2.StreamError); ok { - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + if t.logger.V(logLevel) { + t.logger.Warningf("Encountered http2.StreamError: %v", se) } t.mu.Lock() s := t.activeStreams[se.StreamID] @@ -630,19 +657,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. continue } if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() + t.Close(err) return } - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) - } - t.Close() + t.Close(err) return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if t.operateHeaders(frame, handle, traceCtx) { - t.Close() + if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + t.Close(err) break } case *http2.DataFrame: @@ -658,8 +682,8 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. case *http2.GoAwayFrame: // TODO: Handle GoAway from the client appropriately. default: - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + if t.logger.V(logLevel) { + t.logger.Infof("Received unsupported frame type %T", frame) } } } @@ -826,7 +850,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { } return nil }) - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -843,8 +867,8 @@ const ( func (t *http2Server) handlePing(f *http2.PingFrame) { if f.IsAck() { - if f.Data == goAwayPing.data && t.drainChan != nil { - close(t.drainChan) + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() return } // Maybe it's a BDP ping. @@ -886,10 +910,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - if logger.V(logLevel) { - logger.Errorf("transport: Got too many pings from the client, closing the connection.") - } - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) } } @@ -913,7 +934,7 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) return headerFields } -func (t *http2Server) checkForHeaderListSize(it interface{}) bool { +func (t *http2Server) checkForHeaderListSize(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -921,8 +942,8 @@ func (t *http2Server) checkForHeaderListSize(it interface{}) bool { var sz int64 for _, f := range hdrFrame.hf { if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { - if logger.V(logLevel) { - logger.Errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + if t.logger.V(logLevel) { + t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) } return false } @@ -1035,7 +1056,7 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. - logger.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err) + t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) } @@ -1140,20 +1161,20 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.Drain() + t.Drain("max_idle") return } idleTimer.Reset(val) case <-ageTimer.C: - t.Drain() + t.Drain("max_age") ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: // Close the connection after grace period. - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to maximum connection age.") + if t.logger.V(logLevel) { + t.logger.Infof("Closing server transport due to maximum connection age") } - t.Close() + t.controlBuf.put(closeConnection{}) case <-t.done: } return @@ -1169,10 +1190,7 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to idleness.") - } - t.Close() + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) return } if !outstandingPing { @@ -1199,20 +1217,23 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() { +func (t *http2Server) Close(err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() return } + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) + } t.state = closing streams := t.activeStreams t.activeStreams = nil t.mu.Unlock() t.controlBuf.finish() close(t.done) - if err := t.conn.Close(); err != nil && logger.V(logLevel) { - logger.Infof("transport: error closing conn during Close: %v", err) + if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { + t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) } channelz.RemoveEntry(t.channelzID) // Cancel all active streams. @@ -1292,14 +1313,14 @@ func (t *http2Server) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Server) Drain() { +func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() - if t.drainChan != nil { + if t.drainEvent != nil { return } - t.drainChan = make(chan struct{}) - t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) + t.drainEvent = grpcsync.NewEvent() + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} @@ -1319,19 +1340,17 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // Stop accepting more streams now. t.state = draining sid := t.maxStreamID + retErr := g.closeConn if len(t.activeStreams) == 0 { - g.closeConn = true + retErr = errors.New("second GOAWAY written and no active streams left to process") } t.mu.Unlock() t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } - if g.closeConn { - // Abruptly close the connection following the GoAway (via - // loopywriter). But flush out what's inside the buffer first. - t.framer.writer.Flush() - return false, fmt.Errorf("transport: Connection closing") + if retErr != nil { + return false, retErr } return true, nil } @@ -1343,7 +1362,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // originated before the GoAway reaches the client. // After getting the ack or timer expiration send out another GoAway this // time with an ID of the max stream server intends to process. - if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { return false, err } if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { @@ -1353,7 +1372,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { timer := time.NewTimer(time.Minute) defer timer.Stop() select { - case <-t.drainChan: + case <-t.drainEvent.Done(): case <-timer.C: case <-t.done: return diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 2c601a86..19581400 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -21,6 +21,7 @@ package transport import ( "bufio" "encoding/base64" + "errors" "fmt" "io" "math" @@ -29,6 +30,7 @@ import ( "net/url" "strconv" "strings" + "sync" "time" "unicode/utf8" @@ -37,7 +39,6 @@ import ( "golang.org/x/net/http2/hpack" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" ) @@ -85,7 +86,6 @@ var ( // 504 Gateway timeout - UNAVAILABLE. http.StatusGatewayTimeout: codes.Unavailable, } - logger = grpclog.Component("transport") ) // isReservedHeader checks whether hdr belongs to HTTP2 headers @@ -310,6 +310,7 @@ func decodeGrpcMessageUnchecked(msg string) string { } type bufWriter struct { + pool *sync.Pool buf []byte offset int batchSize int @@ -317,12 +318,17 @@ type bufWriter struct { err error } -func newBufWriter(conn net.Conn, batchSize int) *bufWriter { - return &bufWriter{ - buf: make([]byte, batchSize*2), +func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { + w := &bufWriter{ batchSize: batchSize, conn: conn, + pool: pool, } + // this indicates that we should use non shared buf + if pool == nil { + w.buf = make([]byte, batchSize) + } + return w } func (w *bufWriter) Write(b []byte) (n int, err error) { @@ -330,7 +336,12 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { return 0, w.err } if w.batchSize == 0 { // Buffer has been disabled. - return w.conn.Write(b) + n, err = w.conn.Write(b) + return n, toIOError(err) + } + if w.buf == nil { + b := w.pool.Get().(*[]byte) + w.buf = *b } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) @@ -338,13 +349,24 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { w.offset += nn n += nn if w.offset >= w.batchSize { - err = w.Flush() + err = w.flushKeepBuffer() } } return n, err } func (w *bufWriter) Flush() error { + err := w.flushKeepBuffer() + // Only release the buffer if we are in a "shared" mode + if w.buf != nil && w.pool != nil { + b := w.buf + w.pool.Put(&b) + w.buf = nil + } + return err +} + +func (w *bufWriter) flushKeepBuffer() error { if w.err != nil { return w.err } @@ -352,16 +374,39 @@ func (w *bufWriter) Flush() error { return nil } _, w.err = w.conn.Write(w.buf[:w.offset]) + w.err = toIOError(w.err) w.offset = 0 return w.err } +type ioError struct { + error +} + +func (i ioError) Unwrap() error { + return i.error +} + +func isIOError(err error) bool { + return errors.As(err, &ioError{}) +} + +func toIOError(err error) error { + if err == nil { + return nil + } + return ioError{error: err} +} + type framer struct { writer *bufWriter fr *http2.Framer } -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { +var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferMutex sync.Mutex + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { if writeBufferSize < 0 { writeBufferSize = 0 } @@ -369,7 +414,11 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList if readBufferSize > 0 { r = bufio.NewReaderSize(r, readBufferSize) } - w := newBufWriter(conn, writeBufferSize) + var pool *sync.Pool + if sharedWriteBuffer { + pool = getWriteBufferPool(writeBufferSize) + } + w := newBufWriter(conn, writeBufferSize, pool) f := &framer{ writer: w, fr: http2.NewFramer(w, r), @@ -383,6 +432,24 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList return f } +func getWriteBufferPool(writeBufferSize int) *sync.Pool { + writeBufferMutex.Lock() + defer writeBufferMutex.Unlock() + size := writeBufferSize * 2 + pool, ok := writeBufferPoolMap[size] + if ok { + return pool + } + pool = &sync.Pool{ + New: func() any { + b := make([]byte, size) + return &b + }, + } + writeBufferPoolMap[size] = pool + return pool +} + // parseDialTarget returns the network and address to pass to dialer. func parseDialTarget(target string) (string, string) { net := "tcp" diff --git a/vendor/google.golang.org/grpc/internal/transport/logging.go b/vendor/google.golang.org/grpc/internal/transport/logging.go new file mode 100644 index 00000000..42ed2b07 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/logging.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +var logger = grpclog.Component("transport") + +func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p)) +} + +func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p)) +} + +func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 2e615ee2..74a811fc 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -43,10 +43,6 @@ import ( "google.golang.org/grpc/tap" ) -// ErrNoHeaders is used as a signal that a trailers only response was received, -// and is not a real error. -var ErrNoHeaders = errors.New("stream has no headers") - const logLevel = 2 type bufferPool struct { @@ -56,7 +52,7 @@ type bufferPool struct { func newBufferPool() *bufferPool { return &bufferPool{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { return new(bytes.Buffer) }, }, @@ -257,6 +253,9 @@ type Stream struct { fc *inFlow wq *writeQuota + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. This is empty for the client side stream. + clientAdvertisedCompressors string // Callback to state application's intentions to read data. This // is used to adjust flow control, if needed. requestRead func(int) @@ -345,8 +344,24 @@ func (s *Stream) RecvCompress() string { } // SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(str string) { - s.sendCompress = str +func (s *Stream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SendCompress returns the send compressor name. +func (s *Stream) SendCompress() string { + return s.sendCompress +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *Stream) ClientAdvertisedCompressors() string { + return s.clientAdvertisedCompressors } // Done returns a channel which is closed when it receives the final status @@ -371,14 +386,10 @@ func (s *Stream) Header() (metadata.MD, error) { } s.waitOnHeader() - if !s.headerValid { + if !s.headerValid || s.noHeaders { return nil, s.status.Err() } - if s.noHeaders { - return nil, ErrNoHeaders - } - return s.header.Copy(), nil } @@ -540,6 +551,7 @@ type ServerConfig struct { InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int + SharedWriteBuffer bool ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 @@ -573,6 +585,8 @@ type ConnectOptions struct { WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int + // SharedWriteBuffer indicates whether connections should reuse write buffer + SharedWriteBuffer bool // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. @@ -583,8 +597,8 @@ type ConnectOptions struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) } // Options provides additional hints and information for message @@ -701,13 +715,13 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() + Close(err error) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr // Drain notifies the client this ServerTransport stops accepting new RPCs. - Drain() + Drain(debugData string) // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() @@ -717,7 +731,7 @@ type ServerTransport interface { } // connectionErrorf creates an ConnectionError with the specified error description. -func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { +func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index fb4a88f5..a2cdcaf1 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -91,7 +91,11 @@ func (md MD) Len() int { // Copy returns a copy of md. func (md MD) Copy() MD { - return Join(md) + out := make(MD, len(md)) + for k, v := range md { + out[k] = copyOf(v) + } + return out } // Get obtains the values for a given key. @@ -171,8 +175,11 @@ func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) added := make([][]string, len(md.added)+1) copy(added, md.added) - added[len(added)-1] = make([]string, len(kv)) - copy(added[len(added)-1], kv) + kvCopy := make([]string, 0, len(kv)) + for i := 0; i < len(kv); i += 2 { + kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1]) + } + added[len(added)-1] = kvCopy return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) } diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index a5d5516e..236837f4 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -28,26 +28,36 @@ import ( "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - blockingCh chan struct{} - picker balancer.Picker + mu sync.Mutex + done bool + idle bool + blockingCh chan struct{} + picker balancer.Picker + statsHandlers []stats.Handler // to record blocking picker calls } -func newPickerWrapper() *pickerWrapper { - return &pickerWrapper{blockingCh: make(chan struct{})} +func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { + return &pickerWrapper{ + blockingCh: make(chan struct{}), + statsHandlers: statsHandlers, + } } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done { + if pw.done || pw.idle { + // There is a small window where a picker update from the LB policy can + // race with the channel going to idle mode. If the picker is idle here, + // it is because the channel asked it to do so, and therefore it is sage + // to ignore the update from the LB policy. pw.mu.Unlock() return } @@ -58,12 +68,16 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Unlock() } -func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { - acw.mu.Lock() - ac := acw.ac - acw.mu.Unlock() +// doneChannelzWrapper performs the following: +// - increments the calls started channelz counter +// - wraps the done function in the passed in result to increment the calls +// failed or calls succeeded channelz counter before invoking the actual +// done function. +func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { + ac := acbw.ac ac.incrCallsStarted() - return func(b balancer.DoneInfo) { + done := result.Done + result.Done = func(b balancer.DoneInfo) { if b.Err != nil && b.Err != io.EOF { ac.incrCallsFailed() } else { @@ -82,15 +96,16 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { var ch chan struct{} var lastPickErr error + for { pw.mu.Lock() if pw.done { pw.mu.Unlock() - return nil, nil, ErrClientConnClosing + return nil, balancer.PickResult{}, ErrClientConnClosing } if pw.picker == nil { @@ -111,21 +126,34 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } switch ctx.Err() { case context.DeadlineExceeded: - return nil, nil, status.Error(codes.DeadlineExceeded, errStr) + return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: - return nil, nil, status.Error(codes.Canceled, errStr) + return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) } case <-ch: } continue } + // If the channel is set, it means that the pick call had to wait for a + // new picker at some point. Either it's the first iteration and this + // function received the first picker, or a picker errored with + // ErrNoSubConnAvailable or errored with failfast set to false, which + // will trigger a continue to the next iteration. In the first case this + // conditional will hit if this call had to block (the channel is set). + // In the second case, the only way it will get to this conditional is + // if there is a new picker. + if ch != nil { + for _, sh := range pw.statsHandlers { + sh.HandleRPC(ctx, &stats.PickerUpdated{}) + } + } + ch = pw.blockingCh p := pw.picker pw.mu.Unlock() pickResult, err := p.Pick(info) - if err != nil { if err == balancer.ErrNoSubConnAvailable { continue @@ -136,7 +164,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if istatus.IsRestrictedControlPlaneCode(st) { err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) } - return nil, nil, dropError{error: err} + return nil, balancer.PickResult{}, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -144,19 +172,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. lastPickErr = err continue } - return nil, nil, status.Error(codes.Unavailable, err.Error()) + return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) } - acw, ok := pickResult.SubConn.(*acBalancerWrapper) + acbw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } - if t := acw.getAddrConn().getReadyTransport(); t != nil { + if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { - return t, doneChannelzWrapper(acw, pickResult.Done), nil + doneChannelzWrapper(acbw, &pickResult) + return t, pickResult, nil } - return t, pickResult.Done, nil + return t, pickResult, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. @@ -181,6 +210,25 @@ func (pw *pickerWrapper) close() { close(pw.blockingCh) } +func (pw *pickerWrapper) enterIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.idle = true +} + +func (pw *pickerWrapper) exitIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.blockingCh = make(chan struct{}) + pw.idle = false +} + // dropError is a wrapper error that indicates the LB policy wishes to drop the // RPC and not retry it. type dropError struct { diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index fb7a99e0..2e9cf66b 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -19,15 +19,25 @@ package grpc import ( + "encoding/json" "errors" "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" ) -// PickFirstBalancerName is the name of the pick_first balancer. -const PickFirstBalancerName = "pick_first" +const ( + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = "pick_first" + logPrefix = "[pick-first-lb %p] " +) func newPickfirstBuilder() balancer.Builder { return &pickfirstBuilder{} @@ -36,22 +46,55 @@ func newPickfirstBuilder() balancer.Builder { type pickfirstBuilder struct{} func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - return &pickfirstBalancer{cc: cc} + b := &pickfirstBalancer{cc: cc} + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b } func (*pickfirstBuilder) Name() string { return PickFirstBalancerName } +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of addresses received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + if !envconfig.PickFirstLBConfig { + // Prior to supporting loadbalancing configuration, the pick_first LB + // policy did not implement the balancer.ConfigParser interface. This + // meant that if a non-empty configuration was passed to it, the service + // config unmarshaling code would throw a warning log, but would + // continue using the pick_first LB policy. The code below ensures the + // same behavior is retained if the env var is not set. + if string(js) != "{}" { + logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js)) + } + return nil, nil + } + + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + type pickfirstBalancer struct { + logger *internalgrpclog.PrefixLogger state connectivity.State cc balancer.ClientConn subConn balancer.SubConn } func (b *pickfirstBalancer) ResolverError(err error) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) } if b.subConn == nil { b.state = connectivity.TransientFailure @@ -69,28 +112,49 @@ func (b *pickfirstBalancer) ResolverError(err error) { } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - if len(state.ResolverState.Addresses) == 0 { + addrs := state.ResolverState.Addresses + if len(addrs) == 0 { // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { - // Remove the old subConn. All addresses were removed, so it is no longer - // valid. - b.cc.RemoveSubConn(b.subConn) + // Shut down the old subConn. All addresses were removed, so it is + // no longer valid. + b.subConn.Shutdown() b.subConn = nil } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } + // We don't have to guard this block with the env var because ParseConfig + // already does so. + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + } + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) + grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + if b.subConn != nil { - b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + b.cc.UpdateAddresses(b.subConn, addrs) return nil } - subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + var subConn balancer.SubConn + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(subConn, state) + }, + }) if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + if b.logger.V(2) { + b.logger.Infof("Failed to create new SubConn: %v", err) } b.state = connectivity.TransientFailure b.cc.UpdateState(balancer.State{ @@ -102,24 +166,29 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState b.subConn = subConn b.state = connectivity.Idle b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Idle, - Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) b.subConn.Connect() return nil } +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + if b.logger.V(2) { + b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) } if b.subConn != subConn { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") + if b.logger.V(2) { + b.logger.Infof("Ignored state change because subConn is not recognized") } return } - b.state = state.ConnectivityState if state.ConnectivityState == connectivity.Shutdown { b.subConn = nil return @@ -132,11 +201,21 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, }) case connectivity.Connecting: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. See A62. + return + } b.cc.UpdateState(balancer.State{ ConnectivityState: state.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) case connectivity.Idle: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. Also kick the + // subConn out of Idle into Connecting. See A62. + b.subConn.Connect() + return + } b.cc.UpdateState(balancer.State{ ConnectivityState: state.ConnectivityState, Picker: &idlePicker{subConn: subConn}, @@ -147,6 +226,7 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b Picker: &picker{err: state.ConnectionError}, }) } + b.state = state.ConnectivityState } func (b *pickfirstBalancer) Close() { diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index cd455478..73bd6336 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -37,7 +37,7 @@ type PreparedMsg struct { } // Encode marshalls and compresses the message using the codec and compressor for the stream. -func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { +func (p *PreparedMsg) Encode(s Stream, msg any) error { ctx := s.Context() rpcInfo, ok := rpcInfoFromContext(ctx) if !ok { diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index 99db79fa..a6f26c8a 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -57,7 +57,8 @@ LEGACY_SOURCES=( ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto profiling/proto/service.proto - reflection/grpc_reflection_v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto ) # Generates only the new gRPC Service symbols @@ -119,8 +120,4 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/ # see grpc_testing_not_regenerate/README.md for details. rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go -# grpc/testing does not have a go_package option. -mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ -mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ - cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index efcb7f3e..804be887 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -20,7 +20,7 @@ package resolver type addressMapEntry struct { addr Address - value interface{} + value any } // AddressMap is a map of addresses to arbitrary values taking into account @@ -69,7 +69,7 @@ func (l addressMapEntryList) find(addr Address) int { } // Get returns the value for the address in the map, if present. -func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { +func (a *AddressMap) Get(addr Address) (value any, ok bool) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -79,7 +79,7 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { } // Set updates or adds the value to the address in the map. -func (a *AddressMap) Set(addr Address, value interface{}) { +func (a *AddressMap) Set(addr Address, value any) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -127,8 +127,8 @@ func (a *AddressMap) Keys() []Address { } // Values returns a slice of all current map values. -func (a *AddressMap) Values() []interface{} { - ret := make([]interface{}, 0, a.Len()) +func (a *AddressMap) Values() []any { + ret := make([]any, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { ret = append(ret, entry.value) diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 967cbc73..11384e22 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,12 +22,13 @@ package resolver import ( "context" + "fmt" "net" "net/url" + "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -40,8 +41,9 @@ var ( // TODO(bar) install dns resolver in init(){}. -// Register registers the resolver builder to the resolver map. b.Scheme will be -// used as the scheme registered with this builder. +// Register registers the resolver builder to the resolver map. b.Scheme will +// be used as the scheme registered with this builder. The registry is case +// sensitive, and schemes should not contain any uppercase characters. // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. If multiple Resolvers are @@ -75,25 +77,6 @@ func GetDefaultScheme() string { return defaultScheme } -// AddressType indicates the address type returned by name resolution. -// -// Deprecated: use Attributes in Address instead. -type AddressType uint8 - -const ( - // Backend indicates the address is for a backend server. - // - // Deprecated: use Attributes in Address instead. - Backend AddressType = iota - // GRPCLB indicates the address is for a grpclb load balancer. - // - // Deprecated: to select the GRPCLB load balancing policy, use a service - // config with a corresponding loadBalancingConfig. To supply balancer - // addresses to the GRPCLB load balancing policy, set State.Attributes - // using balancer/grpclb/state.Set. - GRPCLB -) - // Address represents a server the client connects to. // // # Experimental @@ -109,9 +92,6 @@ type Address struct { // the address, instead of the hostname from the Dial target string. In most cases, // this should not be set. // - // If Type is GRPCLB, ServerName should be the name of the remote load - // balancer, not the name of the backend. - // // WARNING: ServerName must only be populated with trusted values. It // is insecure to populate it with data from untrusted inputs since untrusted // values could be used to bypass the authority checks performed by TLS. @@ -122,34 +102,46 @@ type Address struct { Attributes *attributes.Attributes // BalancerAttributes contains arbitrary data about this address intended - // for consumption by the LB policy. These attribes do not affect SubConn + // for consumption by the LB policy. These attributes do not affect SubConn // creation, connection establishment, handshaking, etc. - BalancerAttributes *attributes.Attributes - - // Type is the type of this address. // - // Deprecated: use Attributes instead. - Type AddressType + // Deprecated: when an Address is inside an Endpoint, this field should not + // be used, and it will eventually be removed entirely. + BalancerAttributes *attributes.Attributes // Metadata is the information associated with Addr, which may be used // to make load balancing decision. // // Deprecated: use Attributes instead. - Metadata interface{} + Metadata any } // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. +// +// This method compares all fields of the address. When used to tell apart +// addresses during subchannel creation or connection establishment, it might be +// more appropriate for the caller to implement custom equality logic. func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && - a.Type == o.Type && a.Metadata == o.Metadata + a.Metadata == o.Metadata } // String returns JSON formatted string representation of the address. func (a Address) String() string { - return pretty.ToJSON(a) + var sb strings.Builder + sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) + sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) + if a.Attributes != nil { + sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) + } + if a.BalancerAttributes != nil { + sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) + } + sb.WriteString("}") + return sb.String() } // BuildOptions includes additional information for the builder to create @@ -178,11 +170,37 @@ type BuildOptions struct { Dialer func(context.Context, string) (net.Conn, error) } +// An Endpoint is one network endpoint, or server, which may have multiple +// addresses with which it can be accessed. +type Endpoint struct { + // Addresses contains a list of addresses used to access this endpoint. + Addresses []Address + + // Attributes contains arbitrary data about this endpoint intended for + // consumption by the LB policy. + Attributes *attributes.Attributes +} + // State contains the current Resolver state relevant to the ClientConn. type State struct { // Addresses is the latest set of resolved addresses for the target. + // + // If a resolver sets Addresses but does not set Endpoints, one Endpoint + // will be created for each Address before the State is passed to the LB + // policy. The BalancerAttributes of each entry in Addresses will be set + // in Endpoints.Attributes, and be cleared in the Endpoint's Address's + // BalancerAttributes. + // + // Soon, Addresses will be deprecated and replaced fully by Endpoints. Addresses []Address + // Endpoints is the latest set of resolved endpoints for the target. + // + // If a resolver produces a State containing Endpoints but not Addresses, + // it must take care to ensure the LB policies it selects will support + // Endpoints. + Endpoints []Endpoint + // ServiceConfig contains the result from parsing the latest service // config. If it is nil, it indicates no service config is present or the // resolver does not provide service configs. @@ -202,6 +220,15 @@ type State struct { // gRPC to add new methods to this interface. type ClientConn interface { // UpdateState updates the state of the ClientConn appropriately. + // + // If an error is returned, the resolver should try to resolve the + // target again. The resolver should use a backoff timer to prevent + // overloading the server with requests. If a resolver is certain that + // reresolving will not change the result, e.g. because it is + // a watch-based resolver, returned errors can be ignored. + // + // If the resolved State is the same as the last reported one, calling + // UpdateState can be omitted. UpdateState(State) error // ReportError notifies the ClientConn that the Resolver encountered an // error. The ClientConn will notify the load balancer and begin calling @@ -233,23 +260,7 @@ type ClientConn interface { // target does not contain a scheme or if the parsed scheme is not registered // (i.e. no corresponding resolver available to resolve the endpoint), we will // apply the default scheme, and will attempt to reparse it. -// -// Examples: -// -// - "dns://some_authority/foo.bar" -// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// - "foo.bar" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} -// - "unknown_scheme://authority/endpoint" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - // Deprecated: use URL.Scheme instead. - Scheme string - // Deprecated: use URL.Host instead. - Authority string - // Deprecated: use URL.Path or URL.Opaque instead. The latter is set when - // the former is empty. - Endpoint string // URL contains the parsed dial target with an optional default scheme added // to it if the original dial target contained no scheme or contained an // unregistered scheme. Any query params specified in the original dial @@ -257,6 +268,24 @@ type Target struct { URL url.URL } +// Endpoint retrieves endpoint without leading "/" from either `URL.Path` +// or `URL.Opaque`. The latter is used when the former is empty. +func (t Target) Endpoint() string { + endpoint := t.URL.Path + if endpoint == "" { + endpoint = t.URL.Opaque + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field. + return strings.TrimPrefix(endpoint, "/") +} + // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. @@ -264,8 +293,10 @@ type Builder interface { // gRPC dial calls Build synchronously, and fails if the returned error is // not nil. Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) - // Scheme returns the scheme supported by this resolver. - // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + // Scheme returns the scheme supported by this resolver. Scheme is defined + // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned + // string should not contain uppercase characters, as they will not match + // the parsed target's scheme as defined in RFC 3986. Scheme() string } @@ -283,10 +314,3 @@ type Resolver interface { // Close closes the resolver. Close() } - -// UnregisterForTesting removes the resolver builder with the given scheme from the -// resolver map. -// This function is for testing only. -func UnregisterForTesting(scheme string) { - delete(m, scheme) -} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 05a9d4e0..d6833056 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,11 +19,11 @@ package grpc import ( + "context" "strings" "sync" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" @@ -31,129 +31,200 @@ import ( "google.golang.org/grpc/serviceconfig" ) +// resolverStateUpdater wraps the single method used by ccResolverWrapper to +// report a state update from the actual resolver implementation. +type resolverStateUpdater interface { + updateResolverState(s resolver.State, err error) error +} + // ccResolverWrapper is a wrapper on top of cc for resolvers. // It implements resolver.ClientConn interface. type ccResolverWrapper struct { - cc *ClientConn - resolverMu sync.Mutex - resolver resolver.Resolver - done *grpcsync.Event - curState resolver.State + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc resolverStateUpdater + channelzID *channelz.Identifier + ignoreServiceConfig bool + opts ccResolverWrapperOpts + serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. + serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). + + // All incoming (resolver --> gRPC) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled on the serializer. + // Fields accessed *only* in these serializer callbacks, can therefore be + // accessed without a mutex. + curState resolver.State + + // mu guards access to the below fields. + mu sync.Mutex + closed bool + resolver resolver.Resolver // Accessed only from outgoing calls. +} - incomingMu sync.Mutex // Synchronizes all the incoming calls. +// ccResolverWrapperOpts wraps the arguments to be passed when creating a new +// ccResolverWrapper. +type ccResolverWrapperOpts struct { + target resolver.Target // User specified dial target to resolve. + builder resolver.Builder // Resolver builder to use. + bOpts resolver.BuildOptions // Resolver build options to use. + channelzID *channelz.Identifier // Channelz identifier for the channel. } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and // returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { +func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { + ctx, cancel := context.WithCancel(context.Background()) ccr := &ccResolverWrapper{ - cc: cc, - done: grpcsync.NewEvent(), - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - rbo := resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - } - - var err error - // We need to hold the lock here while we assign to the ccr.resolver field - // to guard against a data race caused by the following code path, - // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up - // accessing ccr.resolver which is being assigned here. - ccr.resolverMu.Lock() - defer ccr.resolverMu.Unlock() - ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + cc: cc, + channelzID: opts.channelzID, + ignoreServiceConfig: opts.bOpts.DisableServiceConfig, + opts: opts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + + // Cannot hold the lock at build time because the resolver can send an + // update or error inline and these incoming calls grab the lock to schedule + // a callback in the serializer. + r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) if err != nil { + cancel() return nil, err } + + // Any error reported by the resolver at build time that leads to a + // re-resolution request from the balancer is dropped by grpc until we + // return from this function. So, we don't have to handle pending resolveNow + // requests here. + ccr.mu.Lock() + ccr.resolver = r + ccr.mu.Unlock() + return ccr, nil } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.resolverMu.Lock() - if !ccr.done.HasFired() { - ccr.resolver.ResolveNow(o) + ccr.mu.Lock() + defer ccr.mu.Unlock() + + // ccr.resolver field is set only after the call to Build() returns. But in + // the process of building, the resolver may send an error update which when + // propagated to the balancer may result in a re-resolution request. + if ccr.closed || ccr.resolver == nil { + return } - ccr.resolverMu.Unlock() + ccr.resolver.ResolveNow(o) } func (ccr *ccResolverWrapper) close() { - ccr.resolverMu.Lock() - ccr.resolver.Close() - ccr.done.Fire() - ccr.resolverMu.Unlock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + return + } + + channelz.Info(logger, ccr.channelzID, "Closing the name resolver") + + // Close the serializer to ensure that no more calls from the resolver are + // handled, before actually closing the resolver. + ccr.serializerCancel() + ccr.closed = true + r := ccr.resolver + ccr.mu.Unlock() + + // Give enqueued callbacks a chance to finish. + <-ccr.serializer.Done() + + // Spawn a goroutine to close the resolver (since it may block trying to + // cleanup all allocated resources) and return early. + go r.Close() +} + +// serializerScheduleLocked is a convenience method to schedule a function to be +// run on the serializer while holding ccr.mu. +func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { + ccr.mu.Lock() + ccr.serializer.Schedule(f) + ccr.mu.Unlock() } +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { + errCh := make(chan error, 1) + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ok := ccr.serializer.Schedule(func(context.Context) { + ccr.addChannelzTraceEvent(s) + ccr.curState = s + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + errCh <- balancer.ErrBadResolverState + return + } + errCh <- nil + }) + if !ok { + // The only time when Schedule() fail to add the callback to the + // serializer is when the serializer is closed, and this happens only + // when the resolver wrapper is closed. return nil } - ccr.addChannelzTraceEvent(s) - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - return balancer.ErrBadResolverState - } - return nil + return <-errCh } +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverState(resolver.State{}, err) + }) } -// NewAddress is called by the resolver implementation to send addresses to gRPC. +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) + ccr.serializerScheduleLocked(func(_ context.Context) { + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + ccr.curState.Addresses = addrs + ccr.cc.updateResolverState(ccr.curState, nil) + }) } // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.cc.dopts.disableServiceConfig { - channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) + if ccr.ignoreServiceConfig { + channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + return + } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) + ccr.curState.ServiceConfig = scpr + ccr.cc.updateResolverState(ccr.curState, nil) + }) } +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { return parseServiceConfig(scJSON) } +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { var updates []string var oldSC, newSC *ServiceConfig @@ -172,5 +243,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) + channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 934fc1aa..b7723aa0 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -25,7 +25,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "math" "strings" "sync" @@ -76,8 +75,8 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { } return &gzipCompressor{ pool: sync.Pool{ - New: func() interface{} { - w, err := gzip.NewWriterLevel(ioutil.Discard, level) + New: func() any { + w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) } @@ -143,7 +142,7 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { z.Close() d.pool.Put(z) }() - return ioutil.ReadAll(z) + return io.ReadAll(z) } func (d *gzipDecompressor) Type() string { @@ -160,6 +159,7 @@ type callInfo struct { contentSubtype string codec baseCodec maxRetryRPCBufferSize int + onFinish []func(err error) } func defaultCallInfo() *callInfo { @@ -296,8 +296,44 @@ func (o FailFastCallOption) before(c *callInfo) error { } func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} +// OnFinish returns a CallOption that configures a callback to be called when +// the call completes. The error passed to the callback is the status of the +// RPC, and may be nil. The onFinish callback provided will only be called once +// by gRPC. This is mainly used to be used by streaming interceptors, to be +// notified when the RPC completes along with information about the status of +// the RPC. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func OnFinish(onFinish func(err error)) CallOption { + return OnFinishCallOption{ + OnFinish: onFinish, + } +} + +// OnFinishCallOption is CallOption that indicates a callback to be called when +// the call completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type OnFinishCallOption struct { + OnFinish func(error) +} + +func (o OnFinishCallOption) before(c *callInfo) error { + c.onFinish = append(c.onFinish, o.OnFinish) + return nil +} + +func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} + // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can receive. +// in bytes the client can receive. If this is not set, gRPC uses the default +// 4MB. func MaxCallRecvMsgSize(bytes int) CallOption { return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} } @@ -320,7 +356,8 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can send. +// in bytes the client can send. If this is not set, gRPC uses the default +// `math.MaxInt32`. func MaxCallSendMsgSize(bytes int) CallOption { return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} } @@ -540,6 +577,9 @@ type parser struct { // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte + + // recvBufferPool is the pool of shared receive buffers. + recvBufferPool SharedBufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -573,9 +613,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead - // of making it for each message: - msg = make([]byte, int(length)) + msg = p.recvBufferPool.Get(int(length)) if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF @@ -588,7 +626,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg interface{}) ([]byte, error) { +func encode(c baseCodec, msg any) ([]byte, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -655,14 +693,15 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { return hdr, data } -func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ - Client: client, - Payload: msg, - Data: data, - Length: len(data), - WireLength: len(payload) + headerLen, - SentTime: t, + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + CompressedLength: len(payload), + SentTime: t, } } @@ -683,17 +722,17 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool } type payloadInfo struct { - wireLength int // The compressed length got from wire. + compressedLength int // The compressed length got from wire. uncompressedBytes []byte } func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, d, err := p.recvMsg(maxReceiveMessageSize) + pf, buf, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return nil, err } if payInfo != nil { - payInfo.wireLength = len(d) + payInfo.compressedLength = len(buf) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { @@ -705,13 +744,13 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - d, err = dc.Do(bytes.NewReader(d)) - size = len(d) + buf, err = dc.Do(bytes.NewReader(buf)) + size = len(buf) } else { - d, size, err = decompress(compressor, d, maxReceiveMessageSize) + buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) } if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java @@ -719,7 +758,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } - return d, nil + return buf, nil } // Using compressor, decompress d, returning data and size. @@ -746,23 +785,25 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize } // Read from LimitReader with limit max+1. So if the underlying // reader is over limit, the result will be bigger than max. - d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) return d, len(d), err } // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } - if err := c.Unmarshal(d, m); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + if err := c.Unmarshal(buf, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } if payInfo != nil { - payInfo.uncompressedBytes = d + payInfo.uncompressedBytes = buf + } else { + p.recvBufferPool.Put(&buf) } return nil } @@ -822,19 +863,22 @@ func ErrorDesc(err error) string { // Errorf returns nil if c is OK. // // Deprecated: use status.Errorf instead. -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return status.Errorf(c, format, a...) } +var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) +var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { switch err { case nil, io.EOF: return err case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) + return errContextDeadline case context.Canceled: - return status.Error(codes.Canceled, err.Error()) + return errContextCanceled case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index f4dde72b..eeae92fb 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -43,8 +43,8 @@ import ( "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -74,10 +74,10 @@ func init() { srv.drainServerTransports(addr) } internal.AddGlobalServerOptions = func(opt ...ServerOption) { - extraServerOptions = append(extraServerOptions, opt...) + globalServerOptions = append(globalServerOptions, opt...) } internal.ClearGlobalServerOptions = func() { - extraServerOptions = nil + globalServerOptions = nil } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption @@ -86,7 +86,7 @@ func init() { var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) +type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -99,26 +99,20 @@ type ServiceDesc struct { ServiceName string // The pointer to the service interface. Used to check whether the user // provided implementation satisfies the interface requirements. - HandlerType interface{} + HandlerType any Methods []MethodDesc Streams []StreamDesc - Metadata interface{} + Metadata any } // serviceInfo wraps information about a service. It is very similar to // ServiceDesc and is constructed from it for internal purposes. type serviceInfo struct { // Contains the implementation for the methods in this service. - serviceImpl interface{} + serviceImpl any methods map[string]*MethodDesc streams map[string]*StreamDesc - mdata interface{} -} - -type serverWorkerData struct { - st transport.ServerTransport - wg *sync.WaitGroup - stream *transport.Stream + mdata any } // Server is a gRPC server to serve RPC requests. @@ -145,7 +139,7 @@ type Server struct { channelzID *channelz.Identifier czData *channelzData - serverWorkerChannels []chan *serverWorkerData + serverWorkerChannel chan func() } type serverOptions struct { @@ -170,20 +164,24 @@ type serverOptions struct { initialConnWindowSize int32 writeBufferSize int readBufferSize int + sharedWriteBuffer bool connectionTimeout time.Duration maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 + recvBufferPool SharedBufferPool } var defaultServerOptions = serverOptions{ + maxConcurrentStreams: math.MaxUint32, maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, + recvBufferPool: nopBufferPool{}, } -var extraServerOptions []ServerOption +var globalServerOptions []ServerOption // A ServerOption sets options such as credentials, codec and keepalive parameters, etc. type ServerOption interface { @@ -233,10 +231,25 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { return &joinServerOption{opts: opts} } -// WriteBufferSize determines how much data can be batched before doing a write on the wire. -// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. -// The default value for this buffer is 32KB. -// Zero will disable the write buffer such that each write will be on underlying connection. +// SharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func SharedWriteBuffer(val bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.sharedWriteBuffer = val + }) +} + +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The corresponding memory allocation for this buffer will be +// twice the size to keep syscalls low. The default value for this buffer is +// 32KB. Zero or negative values will disable the write buffer such that each +// write will be on underlying connection. // Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { @@ -244,11 +257,10 @@ func WriteBufferSize(s int) ServerOption { }) } -// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for one read syscall. -// The default value for this buffer is 32KB. -// Zero will disable read buffer for a connection so data framer can access the underlying -// conn directly. +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. func ReadBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.readBufferSize = s @@ -273,9 +285,9 @@ func InitialConnWindowSize(s int32) ServerOption { // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { - if kp.Time > 0 && kp.Time < time.Second { + if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") - kp.Time = time.Second + kp.Time = internal.KeepaliveMinServerPingTime } return newFuncServerOption(func(o *serverOptions) { @@ -387,6 +399,9 @@ func MaxSendMsgSize(m int) ServerOption { // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { + if n == 0 { + n = math.MaxUint32 + } return newFuncServerOption(func(o *serverOptions) { o.maxConcurrentStreams = n }) @@ -552,6 +567,27 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { }) } +// RecvBufferPool returns a ServerOption that configures the server +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: StatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.recvBufferPool = bufferPool + }) +} + // serverWorkerResetThreshold defines how often the stack must be reset. Every // N requests, by spawning a new goroutine in its place, a worker can reset its // stack so that large stacks don't live in memory forever. 2^16 should allow @@ -560,47 +596,40 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { const serverWorkerResetThreshold = 1 << 16 // serverWorkers blocks on a *transport.Stream channel forever and waits for -// data to be fed by serveStreams. This allows different requests to be +// data to be fed by serveStreams. This allows multiple requests to be // processed by the same goroutine, removing the need for expensive stack // re-allocations (see the runtime.morestack problem [1]). // // [1] https://github.com/golang/go/issues/18138 -func (s *Server) serverWorker(ch chan *serverWorkerData) { - // To make sure all server workers don't reset at the same time, choose a - // random number of iterations before resetting. - threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold) - for completed := 0; completed < threshold; completed++ { - data, ok := <-ch +func (s *Server) serverWorker() { + for completed := 0; completed < serverWorkerResetThreshold; completed++ { + f, ok := <-s.serverWorkerChannel if !ok { return } - s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) - data.wg.Done() + f() } - go s.serverWorker(ch) + go s.serverWorker() } -// initServerWorkers creates worker goroutines and channels to process incoming +// initServerWorkers creates worker goroutines and a channel to process incoming // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { - s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers) + s.serverWorkerChannel = make(chan func()) for i := uint32(0); i < s.opts.numServerWorkers; i++ { - s.serverWorkerChannels[i] = make(chan *serverWorkerData) - go s.serverWorker(s.serverWorkerChannels[i]) + go s.serverWorker() } } func (s *Server) stopServerWorkers() { - for i := uint32(0); i < s.opts.numServerWorkers; i++ { - close(s.serverWorkerChannels[i]) - } + close(s.serverWorkerChannel) } // NewServer creates a gRPC server which has no service registered and has not // started to accept requests yet. func NewServer(opt ...ServerOption) *Server { opts := defaultServerOptions - for _, o := range extraServerOptions { + for _, o := range globalServerOptions { o.apply(&opts) } for _, o := range opt { @@ -634,7 +663,7 @@ func NewServer(opt ...ServerOption) *Server { // printf records an event in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) printf(format string, a ...interface{}) { +func (s *Server) printf(format string, a ...any) { if s.events != nil { s.events.Printf(format, a...) } @@ -642,7 +671,7 @@ func (s *Server) printf(format string, a ...interface{}) { // errorf records an error in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) errorf(format string, a ...interface{}) { +func (s *Server) errorf(format string, a ...any) { if s.events != nil { s.events.Errorf(format, a...) } @@ -657,14 +686,14 @@ type ServiceRegistrar interface { // once the server has started serving. // desc describes the service and its methods and handlers. impl is the // service implementation which is passed to the method handlers. - RegisterService(desc *ServiceDesc, impl interface{}) + RegisterService(desc *ServiceDesc, impl any) } // RegisterService registers a service and its implementation to the gRPC // server. It is called from the IDL generated code. This must be called before // invoking Serve. If ss is non-nil (for legacy code), its type is checked to // ensure it implements sd.HandlerType. -func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { +func (s *Server) RegisterService(sd *ServiceDesc, ss any) { if ss != nil { ht := reflect.TypeOf(sd.HandlerType).Elem() st := reflect.TypeOf(ss) @@ -675,7 +704,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { s.register(sd, ss) } -func (s *Server) register(sd *ServiceDesc, ss interface{}) { +func (s *Server) register(sd *ServiceDesc, ss any) { s.mu.Lock() defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) @@ -716,7 +745,7 @@ type MethodInfo struct { type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. - Metadata interface{} + Metadata any } // GetServiceInfo returns a map from service names to ServiceInfo. @@ -897,7 +926,7 @@ func (s *Server) drainServerTransports(addr string) { s.mu.Lock() conns := s.conns[addr] for st := range conns { - st.Drain() + st.Drain("") } s.mu.Unlock() } @@ -917,6 +946,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { InitialConnWindowSize: s.opts.initialConnWindowSize, WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, + SharedWriteBuffer: s.opts.sharedWriteBuffer, ChannelzParentID: s.channelzID, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, @@ -942,29 +972,29 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { } func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close() + defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup - var roundRobinCounter uint32 + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) st.HandleStreams(func(stream *transport.Stream) { wg.Add(1) + + streamQuota.acquire() + f := func() { + defer streamQuota.release() + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + } + if s.opts.numServerWorkers > 0 { - data := &serverWorkerData{st: st, wg: &wg, stream: stream} select { - case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data: + case s.serverWorkerChannel <- f: + return default: // If all stream workers are busy, fallback to the default code path. - go func() { - s.handleStream(st, stream, s.traceInfo(st, stream)) - wg.Done() - }() } - } else { - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() } + go f() }, func(ctx context.Context, method string) context.Context { if !EnableTracing { return ctx @@ -1008,7 +1038,8 @@ var _ http.Handler = (*Server)(nil) func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. return } if !s.addConn(listenerAddressForServeHTTP, st) { @@ -1046,13 +1077,13 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { - st.Close() + st.Close(errors.New("Server.addConn called when server has already been stopped")) return false } if s.drain { // Transport added after we drained our existing conns: drain it // immediately. - st.Drain() + st.Drain("") } if s.conns[addr] == nil { @@ -1102,7 +1133,7 @@ func (s *Server) incrCallsFailed() { atomic.AddInt64(&s.czData.callsFailed, 1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) @@ -1149,22 +1180,17 @@ func chainUnaryServerInterceptors(s *Server) { } func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next UnaryHandler - } - state.next = func(ctx context.Context, req interface{}) (interface{}, error) { - if state.i == len(interceptors)-1 { - return interceptors[state.i](ctx, req, info, handler) - } - state.i++ - return interceptors[state.i-1](ctx, req, info, state.next) - } - return state.next(ctx, req) + return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(ctx context.Context, req any) (any, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1200,7 +1226,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. defer func() { if trInfo != nil { if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } trInfo.tr.Finish() @@ -1256,7 +1282,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. logEntry.PeerAddr = peer.Addr } for _, binlog := range binlogs { - binlog.Log(logEntry) + binlog.Log(ctx, logEntry) } } @@ -1267,6 +1293,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. var comp, decomp encoding.Compressor var cp Compressor var dc Decompressor + var sendCompressorName string // If dc is set and matches the stream's compression, use it. Otherwise, try // to find a matching registered compressor for decomp. @@ -1287,12 +1314,18 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { cp = s.opts.cp - stream.SetSendCompress(cp.Type()) + sendCompressorName = cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. comp = encoding.GetCompressor(rc) if comp != nil { - stream.SetSendCompress(rc) + sendCompressorName = comp.Name() + } + } + + if sendCompressorName != "" { + if err := stream.SetSendCompress(sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) } } @@ -1300,27 +1333,28 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } - d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } if channelz.IsOn() { t.IncrMsgRecv() } - df := func(v interface{}) error { + df := func(v any) error { if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } for _, sh := range shs { sh.HandleRPC(stream.Context(), &stats.InPayload{ - RecvTime: time.Now(), - Payload: v, - WireLength: payInfo.wireLength + headerLen, - Data: d, - Length: len(d), + RecvTime: time.Now(), + Payload: v, + Length: len(d), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Data: d, }) } if len(binlogs) != 0 { @@ -1328,7 +1362,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: d, } for _, binlog := range binlogs { - binlog.Log(cm) + binlog.Log(stream.Context(), cm) } } if trInfo != nil { @@ -1361,7 +1395,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Header: h, } for _, binlog := range binlogs { - binlog.Log(sh) + binlog.Log(stream.Context(), sh) } } st := &binarylog.ServerTrailer{ @@ -1369,7 +1403,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(st) + binlog.Log(stream.Context(), st) } } return appErr @@ -1379,6 +1413,11 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } opts := &transport.Options{Last: true} + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if stream.SendCompress() != sendCompressorName { + comp = encoding.GetCompressor(stream.SendCompress()) + } if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). @@ -1406,8 +1445,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(sh) - binlog.Log(st) + binlog.Log(stream.Context(), sh) + binlog.Log(stream.Context(), st) } } return err @@ -1421,8 +1460,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: reply, } for _, binlog := range binlogs { - binlog.Log(sh) - binlog.Log(sm) + binlog.Log(stream.Context(), sh) + binlog.Log(stream.Context(), sm) } } if channelz.IsOn() { @@ -1434,17 +1473,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // TODO: Should we be logging if writing status failed here, like above? // Should the logging be in WriteStatus? Should we ignore the WriteStatus // error or allow the stats handler to see it? - err = t.WriteStatus(stream, statusOK) if len(binlogs) != 0 { st := &binarylog.ServerTrailer{ Trailer: stream.Trailer(), Err: appErr, } for _, binlog := range binlogs { - binlog.Log(st) + binlog.Log(stream.Context(), st) } } - return err + return t.WriteStatus(stream, statusOK) } // chainStreamServerInterceptors chains all stream server interceptors into one. @@ -1469,22 +1507,17 @@ func chainStreamServerInterceptors(s *Server) { } func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { - return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next StreamHandler - } - state.next = func(srv interface{}, ss ServerStream) error { - if state.i == len(interceptors)-1 { - return interceptors[state.i](srv, ss, info, handler) - } - state.i++ - return interceptors[state.i-1](srv, ss, info, state.next) - } - return state.next(srv, ss) + return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(srv any, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1510,7 +1543,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ctx: ctx, t: t, s: stream, - p: &parser{r: stream}, + p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1524,7 +1557,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if trInfo != nil { ss.mu.Lock() if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } ss.trInfo.tr.Finish() @@ -1583,7 +1616,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(logEntry) + binlog.Log(stream.Context(), logEntry) } } @@ -1606,12 +1639,18 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { ss.cp = s.opts.cp - stream.SetSendCompress(s.opts.cp.Type()) + ss.sendCompressorName = s.opts.cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. ss.comp = encoding.GetCompressor(rc) if ss.comp != nil { - stream.SetSendCompress(rc) + ss.sendCompressorName = rc + } + } + + if ss.sendCompressorName != "" { + if err := stream.SetSendCompress(ss.sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) } } @@ -1621,7 +1660,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp trInfo.tr.LazyLog(&trInfo.firstLine, false) } var appErr error - var server interface{} + var server any if info != nil { server = info.serviceImpl } @@ -1649,16 +1688,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.SetError() ss.mu.Unlock() } - t.WriteStatus(ss.s, appStatus) if len(ss.binlogs) != 0 { st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(st) + binlog.Log(stream.Context(), st) } } + t.WriteStatus(ss.s, appStatus) // TODO: Should we log an error from WriteStatus here and below? return appErr } @@ -1667,17 +1706,16 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.trInfo.tr.LazyLog(stringer("OK"), false) ss.mu.Unlock() } - err = t.WriteStatus(ss.s, statusOK) if len(ss.binlogs) != 0 { st := &binarylog.ServerTrailer{ Trailer: ss.s.Trailer(), Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(st) + binlog.Log(stream.Context(), st) } } - return err + return t.WriteStatus(ss.s, statusOK) } func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { @@ -1688,13 +1726,13 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str pos := strings.LastIndex(sm, "/") if pos == -1 { if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) trInfo.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) @@ -1735,7 +1773,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) @@ -1819,7 +1857,7 @@ func (s *Server) Stop() { } for _, cs := range conns { for st := range cs { - st.Close() + st.Close(errors.New("Server.Stop called")) } } if s.opts.numServerWorkers > 0 { @@ -1855,7 +1893,7 @@ func (s *Server) GracefulStop() { if !s.drain { for _, conns := range s.conns { for st := range conns { - st.Drain() + st.Drain("graceful_stop") } } s.drain = true @@ -1944,6 +1982,60 @@ func SendHeader(ctx context.Context, md metadata.MD) error { return nil } +// SetSendCompressor sets a compressor for outbound messages from the server. +// It must not be called after any event that causes headers to be sent +// (see ServerStream.SetHeader for the complete list). Provided compressor is +// used when below conditions are met: +// +// - compressor is registered via encoding.RegisterCompressor +// - compressor name must exist in the client advertised compressor names +// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to +// get client supported compressor names. +// +// The context provided must be the context passed to the server's handler. +// It must be noted that compressor name encoding.Identity disables the +// outbound compression. +// By default, server messages will be sent using the same compressor with +// which request messages were sent. +// +// It is not safe to call SetSendCompressor concurrently with SendHeader and +// SendMsg. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func SetSendCompressor(ctx context.Context, name string) error { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return fmt.Errorf("failed to fetch the stream from the given context") + } + + if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil { + return fmt.Errorf("unable to set send compressor: %w", err) + } + + return stream.SetSendCompress(name) +} + +// ClientSupportedCompressors returns compressor names advertised by the client +// via grpc-accept-encoding header. +// +// The context provided must be the context passed to the server's handler. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func ClientSupportedCompressors(ctx context.Context) ([]string, error) { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) + } + + return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil +} + // SetTrailer sets the trailer metadata that will be sent when an RPC returns. // When called more than once, all the provided metadata will be merged. // @@ -1978,3 +2070,53 @@ type channelzServer struct { func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { return c.s.channelzMetric() } + +// validateSendCompressor returns an error when given compressor name cannot be +// handled by the server or the client based on the advertised compressors. +func validateSendCompressor(name, clientCompressors string) error { + if name == encoding.Identity { + return nil + } + + if !grpcutil.IsCompressorNameRegistered(name) { + return fmt.Errorf("compressor not registered %q", name) + } + + for _, c := range strings.Split(clientCompressors, ",") { + if c == name { + return nil // found match + } + } + return fmt.Errorf("client does not support compressor %q", name) +} + +// atomicSemaphore implements a blocking, counting semaphore. acquire should be +// called synchronously; release may be called asynchronously. +type atomicSemaphore struct { + n atomic.Int64 + wait chan struct{} +} + +func (q *atomicSemaphore) acquire() { + if q.n.Add(-1) < 0 { + // We ran out of quota. Block until a release happens. + <-q.wait + } +} + +func (q *atomicSemaphore) release() { + // N.B. the "<= 0" check below should allow for this to work with multiple + // concurrent calls to acquire, but also note that with synchronous calls to + // acquire, as our system does, n will never be less than -1. There are + // fairness issues (queuing) to consider if this was to be generalized. + if q.n.Add(1) <= 0 { + // An acquire was waiting on us. Unblock it. + q.wait <- struct{}{} + } +} + +func newHandlerQuota(n uint32) *atomicSemaphore { + a := &atomicSemaphore{wait: make(chan struct{}, 1)} + a.n.Store(int64(n)) + return a +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 01bbb202..0df11fc0 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -23,8 +23,6 @@ import ( "errors" "fmt" "reflect" - "strconv" - "strings" "time" "google.golang.org/grpc/codes" @@ -106,8 +104,8 @@ type healthCheckConfig struct { type jsonRetryPolicy struct { MaxAttempts int - InitialBackoff string - MaxBackoff string + InitialBackoff internalserviceconfig.Duration + MaxBackoff internalserviceconfig.Duration BackoffMultiplier float64 RetryableStatusCodes []codes.Code } @@ -129,50 +127,6 @@ type retryThrottlingPolicy struct { TokenRatio float64 } -func parseDuration(s *string) (*time.Duration, error) { - if s == nil { - return nil, nil - } - if !strings.HasSuffix(*s, "s") { - return nil, fmt.Errorf("malformed duration %q", *s) - } - ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) - if len(ss) > 2 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - // hasDigits is set if either the whole or fractional part of the number is - // present, since both are optional but one is required. - hasDigits := false - var d time.Duration - if len(ss[0]) > 0 { - i, err := strconv.ParseInt(ss[0], 10, 32) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - d = time.Duration(i) * time.Second - hasDigits = true - } - if len(ss) == 2 && len(ss[1]) > 0 { - if len(ss[1]) > 9 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - f, err := strconv.ParseInt(ss[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - for i := 9; i > len(ss[1]); i-- { - f *= 10 - } - d += time.Duration(f) - hasDigits = true - } - if !hasDigits { - return nil, fmt.Errorf("malformed duration %q", *s) - } - - return &d, nil -} - type jsonName struct { Service string Method string @@ -201,7 +155,7 @@ func (j jsonName) generatePath() (string, error) { type jsonMC struct { Name *[]jsonName WaitForReady *bool - Timeout *string + Timeout *internalserviceconfig.Duration MaxRequestMessageBytes *int64 MaxResponseMessageBytes *int64 RetryPolicy *jsonRetryPolicy @@ -226,7 +180,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ @@ -252,18 +206,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { if m.Name == nil { continue } - d, err := parseDuration(m.Timeout) - if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) - return &serviceconfig.ParseResult{Err: err} - } mc := MethodConfig{ WaitForReady: m.WaitForReady, - Timeout: d, + Timeout: (*time.Duration)(m.Timeout), } if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -283,13 +232,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { for i, n := range *m.Name { path, err := n.generatePath() if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } if _, ok := paths[path]; ok { err = errDuplicatedName - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } paths[path] = struct{}{} @@ -312,18 +261,10 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol if jrp == nil { return nil, nil } - ib, err := parseDuration(&jrp.InitialBackoff) - if err != nil { - return nil, err - } - mb, err := parseDuration(&jrp.MaxBackoff) - if err != nil { - return nil, err - } if jrp.MaxAttempts <= 1 || - *ib <= 0 || - *mb <= 0 || + jrp.InitialBackoff <= 0 || + jrp.MaxBackoff <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) @@ -332,8 +273,8 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol rp := &internalserviceconfig.RetryPolicy{ MaxAttempts: jrp.MaxAttempts, - InitialBackoff: *ib, - MaxBackoff: *mb, + InitialBackoff: time.Duration(jrp.InitialBackoff), + MaxBackoff: time.Duration(jrp.MaxBackoff), BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go new file mode 100644 index 00000000..48a64cfe --- /dev/null +++ b/vendor/google.golang.org/grpc/shared_buffer_pool.go @@ -0,0 +1,154 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import "sync" + +// SharedBufferPool is a pool of buffers that can be shared, resulting in +// decreased memory allocation. Currently, in gRPC-go, it is only utilized +// for parsing incoming messages. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +type SharedBufferPool interface { + // Get returns a buffer with specified length from the pool. + // + // The returned byte slice may be not zero initialized. + Get(length int) []byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +// NewSharedBufferPool creates a simple SharedBufferPool with buckets +// of different sizes to optimize memory usage. This prevents the pool from +// wasting large amounts of memory, even when handling messages of varying sizes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewSharedBufferPool() SharedBufferPool { + return &simpleSharedBufferPool{ + pools: [poolArraySize]simpleSharedBufferChildPool{ + newBytesPool(level0PoolMaxSize), + newBytesPool(level1PoolMaxSize), + newBytesPool(level2PoolMaxSize), + newBytesPool(level3PoolMaxSize), + newBytesPool(level4PoolMaxSize), + newBytesPool(0), + }, + } +} + +// simpleSharedBufferPool is a simple implementation of SharedBufferPool. +type simpleSharedBufferPool struct { + pools [poolArraySize]simpleSharedBufferChildPool +} + +func (p *simpleSharedBufferPool) Get(size int) []byte { + return p.pools[p.poolIdx(size)].Get(size) +} + +func (p *simpleSharedBufferPool) Put(bs *[]byte) { + p.pools[p.poolIdx(cap(*bs))].Put(bs) +} + +func (p *simpleSharedBufferPool) poolIdx(size int) int { + switch { + case size <= level0PoolMaxSize: + return level0PoolIdx + case size <= level1PoolMaxSize: + return level1PoolIdx + case size <= level2PoolMaxSize: + return level2PoolIdx + case size <= level3PoolMaxSize: + return level3PoolIdx + case size <= level4PoolMaxSize: + return level4PoolIdx + default: + return levelMaxPoolIdx + } +} + +const ( + level0PoolMaxSize = 16 // 16 B + level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B + level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB + level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB + level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB +) + +const ( + level0PoolIdx = iota + level1PoolIdx + level2PoolIdx + level3PoolIdx + level4PoolIdx + levelMaxPoolIdx + poolArraySize +) + +type simpleSharedBufferChildPool interface { + Get(size int) []byte + Put(any) +} + +type bufferPool struct { + sync.Pool + + defaultSize int +} + +func (p *bufferPool) Get(size int) []byte { + bs := p.Pool.Get().(*[]byte) + + if cap(*bs) < size { + p.Pool.Put(bs) + + return make([]byte, size) + } + + return (*bs)[:size] +} + +func newBytesPool(size int) simpleSharedBufferChildPool { + return &bufferPool{ + Pool: sync.Pool{ + New: func() any { + bs := make([]byte, size) + return &bs + }, + }, + defaultSize: size, + } +} + +// nopBufferPool is a buffer pool just makes new buffer without pooling. +type nopBufferPool struct { +} + +func (nopBufferPool) Get(length int) []byte { + return make([]byte, length) +} + +func (nopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 0285dcc6..4ab70e2d 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -59,18 +59,36 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} +// PickerUpdated indicates that the LB policy provided a new picker while the +// RPC was waiting for one. +type PickerUpdated struct{} + +// IsClient indicates if the stats information is from client side. Only Client +// Side interfaces with a Picker, thus always returns true. +func (*PickerUpdated) IsClient() bool { return true } + +func (*PickerUpdated) isRPCStats() {} + // InPayload contains the information for an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte - // Length is the length of uncompressed data. + + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. WireLength int + // RecvTime is the time when the payload is received. RecvTime time.Time } @@ -126,12 +144,18 @@ type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte - // Length is the length of uncompressed data. + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). Length int - // WireLength is the length of data on wire (compressed, signed, encrypted). + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. WireLength int // SentTime is the time when the payload is sent. SentTime time.Time diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 623be39f..a93360ef 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -50,7 +50,7 @@ func New(c codes.Code, msg string) *Status { } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -60,7 +60,7 @@ func Error(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Error(c, fmt.Sprintf(format, a...)) } @@ -77,9 +77,18 @@ func FromProto(s *spb.Status) *Status { // FromError returns a Status representation of err. // // - If err was produced by this package or implements the method `GRPCStatus() -// *Status`, the appropriate Status is returned. +// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type +// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped +// errors, the message returned contains the entire err.Error() text and not +// just the wrapped status. In that case, ok is true. // -// - If err is nil, a Status is returned with codes.OK and no message. +// - If err is nil, a Status is returned with codes.OK and no message, and ok +// is true. +// +// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` +// returns nil (which maps to Codes.OK), or if err wraps a type +// satisfying this, a Status is returned with codes.Unknown and err's +// Error() message, and ok is false. // // - Otherwise, err is an error not compatible with this package. In this // case, a Status is returned with codes.Unknown and err's Error() message, @@ -88,10 +97,31 @@ func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus(), true + type grpcstatus interface{ GRPCStatus() *Status } + if gs, ok := err.(grpcstatus); ok { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error has status nil, which maps to codes.OK. There + // is no sensible behavior for this, so we turn it into + // an error with codes.Unknown and discard the existing + // status. + return New(codes.Unknown, err.Error()), false + } + return grpcStatus, true + } + var gs grpcstatus + if errors.As(err, &gs) { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error wraps an error that has status nil, which maps + // to codes.OK. There is no sensible behavior for this, + // so we turn it into an error with codes.Unknown and + // discard the existing status. + return New(codes.Unknown, err.Error()), false + } + p := grpcStatus.Proto() + p.Message = err.Error() + return status.FromProto(p), true } return New(codes.Unknown, err.Error()), false } @@ -103,19 +133,16 @@ func Convert(err error) *Status { return s } -// Code returns the Code of the error if it is a Status error, codes.OK if err -// is nil, or codes.Unknown otherwise. +// Code returns the Code of the error if it is a Status error or if it wraps a +// Status error. If that is not the case, it returns codes.OK if err is nil, or +// codes.Unknown otherwise. func Code(err error) codes.Code { // Don't use FromError to avoid allocation of OK status. if err == nil { return codes.OK } - if se, ok := err.(interface { - GRPCStatus() *Status - }); ok { - return se.GRPCStatus().Code() - } - return codes.Unknown + + return Convert(err).Code() } // FromContextError converts a context error or wrapped context error into a diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 960c3e33..b14b2fbe 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" @@ -54,7 +55,7 @@ import ( // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type StreamHandler func(srv interface{}, stream ServerStream) error +type StreamHandler func(srv any, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used // on the server when registering services and on the client when initiating @@ -79,9 +80,9 @@ type Stream interface { // Deprecated: See ClientStream and ServerStream documentation instead. Context() context.Context // Deprecated: See ClientStream and ServerStream documentation instead. - SendMsg(m interface{}) error + SendMsg(m any) error // Deprecated: See ClientStream and ServerStream documentation instead. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientStream defines the client-side behavior of a streaming RPC. @@ -90,7 +91,9 @@ type Stream interface { // status package. type ClientStream interface { // Header returns the header metadata received from the server if there - // is any. It blocks if the metadata is not ready to read. + // is any. It blocks if the metadata is not ready to read. If the metadata + // is nil and the error is also nil, then the stream was terminated without + // headers, and the status can be discovered by calling RecvMsg. Header() (metadata.MD, error) // Trailer returns the trailer metadata from the server, if there is any. // It must only be called after stream.CloseAndRecv has returned, or @@ -123,7 +126,10 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m interface{}) error + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -132,7 +138,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // NewStream creates a new Stream for the client side. This is typically @@ -168,10 +174,29 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { - if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { + // Start tracking the RPC for idleness purposes. This is where a stream is + // created for both streaming and unary RPCs, and hence is a good place to + // track active RPC count. + if err := cc.idlenessMgr.OnCallBegin(); err != nil { + return nil, err + } + // Add a calloption, to decrement the active call count, that gets executed + // when the RPC completes. + opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) + + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + // validate md if err := imetadata.Validate(md); err != nil { return nil, status.Error(codes.Internal, err.Error()) } + // validate added + for _, kvs := range added { + for i := 0; i < len(kvs); i += 2 { + if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + } } if channelz.IsOn() { cc.incrCallsStarted() @@ -352,7 +377,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client } } for _, binlog := range cs.binlogs { - binlog.Log(logEntry) + binlog.Log(cs.ctx, logEntry) } } @@ -416,7 +441,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( @@ -438,7 +463,7 @@ func (a *csAttempt) getTransport() error { cs := a.cs var err error - a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { if de, ok := err.(dropError); ok { err = de.error @@ -455,6 +480,25 @@ func (a *csAttempt) getTransport() error { func (a *csAttempt) newStream() error { cs := a.cs cs.callHdr.PreviousAttempts = cs.numRetries + + // Merge metadata stored in PickResult, if any, with existing call metadata. + // It is safe to overwrite the csAttempt's context here, since all state + // maintained in it are local to the attempt. When the attempt has to be + // retried, a new instance of csAttempt will be created. + if a.pickResult.Metadata != nil { + // We currently do not have a function it the metadata package which + // merges given metadata with existing metadata in a context. Existing + // function `AppendToOutgoingContext()` takes a variadic argument of key + // value pairs. + // + // TODO: Make it possible to retrieve key value pairs from metadata.MD + // in a form passable to AppendToOutgoingContext(), or create a version + // of AppendToOutgoingContext() that accepts a metadata.MD. + md, _ := metadata.FromOutgoingContext(a.ctx) + md = metadata.Join(md, a.pickResult.Metadata) + a.ctx = metadata.NewOutgoingContext(a.ctx, md) + } + s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { nse, ok := err.(*transport.NewStreamError) @@ -471,7 +515,7 @@ func (a *csAttempt) newStream() error { return toRPCErr(nse.Err) } a.s = s - a.p = &parser{r: s} + a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} return nil } @@ -529,12 +573,12 @@ type clientStream struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { - ctx context.Context - cs *clientStream - t transport.ClientTransport - s *transport.Stream - p *parser - done func(balancer.DoneInfo) + ctx context.Context + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + pickResult balancer.PickResult finished bool dc Decompressor @@ -752,23 +796,24 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD - noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() - if err == transport.ErrNoHeaders { - noHeader = true - return nil - } return toRPCErr(err) }, cs.commitAttemptLocked) + if m == nil && err == nil { + // The stream ended with success. Finish the clientStream. + err = io.EOF + } + if err != nil { cs.finish(err) - return nil, err + // Do not return the error. The user should get it by calling Recv(). + return nil, nil } - if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { // Only log if binary log is on and header has not been logged, and // there is actually headers to log. logEntry := &binarylog.ServerHeader{ @@ -781,9 +826,10 @@ func (cs *clientStream) Header() (metadata.MD, error) { } cs.serverHeaderBinlogged = true for _, binlog := range cs.binlogs { - binlog.Log(logEntry) + binlog.Log(cs.ctx, logEntry) } } + return m, nil } @@ -824,7 +870,7 @@ func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error cs.buffer = append(cs.buffer, op) } -func (cs *clientStream) SendMsg(m interface{}) (err error) { +func (cs *clientStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -862,13 +908,13 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { Message: data, } for _, binlog := range cs.binlogs { - binlog.Log(cm) + binlog.Log(cs.ctx, cm) } } return err } -func (cs *clientStream) RecvMsg(m interface{}) error { +func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() @@ -886,30 +932,12 @@ func (cs *clientStream) RecvMsg(m interface{}) error { Message: recvInfo.uncompressedBytes, } for _, binlog := range cs.binlogs { - binlog.Log(sm) + binlog.Log(cs.ctx, sm) } } if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - - if len(cs.binlogs) != 0 { - // finish will not log Trailer. Log Trailer here. - logEntry := &binarylog.ServerTrailer{ - OnClientSide: true, - Trailer: cs.Trailer(), - Err: err, - } - if logEntry.Err == io.EOF { - logEntry.Err = nil - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - for _, binlog := range cs.binlogs { - binlog.Log(logEntry) - } - } } return err } @@ -934,7 +962,7 @@ func (cs *clientStream) CloseSend() error { OnClientSide: true, } for _, binlog := range cs.binlogs { - binlog.Log(chc) + binlog.Log(cs.ctx, chc) } } // We never returned an error here for reasons. @@ -952,6 +980,9 @@ func (cs *clientStream) finish(err error) { return } cs.finished = true + for _, onFinish := range cs.callInfo.onFinish { + onFinish(err) + } cs.commitAttemptLocked() if cs.attempt != nil { cs.attempt.finish(err) @@ -962,18 +993,30 @@ func (cs *clientStream) finish(err error) { } } } + cs.mu.Unlock() - // For binary logging. only log cancel in finish (could be caused by RPC ctx - // canceled or ClientConn closed). Trailer will be logged in RecvMsg. - // - // Only one of cancel or trailer needs to be logged. In the cases where - // users don't call RecvMsg, users must have already canceled the RPC. - if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { - c := &binarylog.Cancel{ - OnClientSide: true, - } - for _, binlog := range cs.binlogs { - binlog.Log(c) + // Only one of cancel or trailer needs to be logged. + if len(cs.binlogs) != 0 { + switch err { + case errContextCanceled, errContextDeadline, ErrClientConnClosing: + c := &binarylog.Cancel{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } + default: + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } } if err == nil { @@ -989,7 +1032,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1016,7 +1059,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { return nil } -func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { +func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} @@ -1062,9 +1105,10 @@ func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { RecvTime: time.Now(), Payload: m, // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), + Data: payInfo.uncompressedBytes, + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Length: len(payInfo.uncompressedBytes), }) } if channelz.IsOn() { @@ -1103,12 +1147,12 @@ func (a *csAttempt) finish(err error) { tr = a.s.Trailer() } - if a.done != nil { + if a.pickResult.Done != nil { br := false if a.s != nil { br = a.s.BytesReceived() } - a.done(balancer.DoneInfo{ + a.pickResult.Done(balancer.DoneInfo{ Err: err, Trailer: tr, BytesSent: a.s != nil, @@ -1230,17 +1274,22 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s} + as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { - // Listen on cc and stream contexts to cleanup when the user closes the - // ClientConn or cancels the stream context. In all other cases, an error - // should already be injected into the recv buffer by the transport, which - // the client will eventually receive, and then we will cancel the stream's - // context in clientStream.finish. + // Listen on stream context to cleanup when the stream context is + // canceled. Also listen for the addrConn's context in case the + // addrConn is closed or reconnects to a different address. In all + // other cases, an error should already be injected into the recv + // buffer by the transport, which the client will eventually receive, + // and then we will cancel the stream's context in + // addrConnStream.finish. go func() { + ac.mu.Lock() + acCtx := ac.ctx + ac.mu.Unlock() select { - case <-ac.ctx.Done(): + case <-acCtx.Done(): as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) case <-ctx.Done(): as.finish(toRPCErr(ctx.Err())) @@ -1303,7 +1352,7 @@ func (as *addrConnStream) Context() context.Context { return as.s.Context() } -func (as *addrConnStream) SendMsg(m interface{}) (err error) { +func (as *addrConnStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -1348,7 +1397,7 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) { return nil } -func (as *addrConnStream) RecvMsg(m interface{}) (err error) { +func (as *addrConnStream) RecvMsg(m any) (err error) { defer func() { if err != nil || !as.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. @@ -1464,7 +1513,10 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. - SendMsg(m interface{}) error + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On // any non-EOF error, the stream is aborted and the error contains the @@ -1473,7 +1525,7 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // serverStream implements a server side Stream. @@ -1489,6 +1541,8 @@ type serverStream struct { comp encoding.Compressor decomp encoding.Compressor + sendCompressorName string + maxReceiveMessageSize int maxSendMessageSize int trInfo *traceInfo @@ -1536,7 +1590,7 @@ func (ss *serverStream) SendHeader(md metadata.MD) error { } ss.serverHeaderBinlogged = true for _, binlog := range ss.binlogs { - binlog.Log(sh) + binlog.Log(ss.ctx, sh) } } return err @@ -1552,7 +1606,7 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { ss.s.SetTrailer(md) } -func (ss *serverStream) SendMsg(m interface{}) (err error) { +func (ss *serverStream) SendMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1560,7 +1614,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) } else { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1581,6 +1635,13 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } }() + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { + ss.comp = encoding.GetCompressor(sendCompressorsName) + ss.sendCompressorName = sendCompressorsName + } + // load hdr, payload, data hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) if err != nil { @@ -1602,14 +1663,14 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { } ss.serverHeaderBinlogged = true for _, binlog := range ss.binlogs { - binlog.Log(sh) + binlog.Log(ss.ctx, sh) } } sm := &binarylog.ServerMessage{ Message: data, } for _, binlog := range ss.binlogs { - binlog.Log(sm) + binlog.Log(ss.ctx, sm) } } if len(ss.statsHandler) != 0 { @@ -1620,7 +1681,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { return nil } -func (ss *serverStream) RecvMsg(m interface{}) (err error) { +func (ss *serverStream) RecvMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1628,7 +1689,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) } else if err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1657,7 +1718,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} for _, binlog := range ss.binlogs { - binlog.Log(chc) + binlog.Log(ss.ctx, chc) } } return err @@ -1673,9 +1734,10 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { RecvTime: time.Now(), Payload: m, // TODO truncate large payload. - Data: payInfo.uncompressedBytes, - WireLength: payInfo.wireLength + headerLen, - Length: len(payInfo.uncompressedBytes), + Data: payInfo.uncompressedBytes, + Length: len(payInfo.uncompressedBytes), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, }) } } @@ -1684,7 +1746,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { Message: payInfo.uncompressedBytes, } for _, binlog := range ss.binlogs { - binlog.Log(cm) + binlog.Log(ss.ctx, cm) } } return nil @@ -1699,7 +1761,7 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { // prepareMsg returns the hdr, payload and data // using the compressors passed or using the // passed preparedmsg -func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil } diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 07a2d26b..9ded7932 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -97,8 +97,8 @@ func truncate(x string, l int) string { // payload represents an RPC request or response payload. type payload struct { - sent bool // whether this is an outgoing payload - msg interface{} // e.g. a proto.Message + sent bool // whether this is an outgoing payload + msg any // e.g. a proto.Message // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? } @@ -111,7 +111,7 @@ func (p payload) String() string { type fmtStringer struct { format string - a []interface{} + a []any } func (f *fmtStringer) String() string { diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 2198e709..724ad210 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.51.0" +const Version = "1.58.3" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index bd8e0cdb..bbc9e2e3 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -41,16 +41,8 @@ if [[ "$1" = "-install" ]]; then github.com/client9/misspell/cmd/misspell popd if [[ -z "${VET_SKIP_PROTO}" ]]; then - if [[ "${TRAVIS}" = "true" ]]; then - PROTOBUF_VERSION=3.14.0 - PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip - pushd /home/travis - wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} - unzip ${PROTOC_FILENAME} - bin/protoc --version - popd - elif [[ "${GITHUB_ACTIONS}" = "true" ]]; then - PROTOBUF_VERSION=3.14.0 + if [[ "${GITHUB_ACTIONS}" = "true" ]]; then + PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files. PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip pushd /home/runner/go wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} @@ -66,6 +58,16 @@ elif [[ "$#" -ne 0 ]]; then die "Unknown argument(s): $*" fi +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + make proto && git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +if [[ -n "${VET_ONLY_PROTO}" ]]; then + exit 0 +fi + # - Ensure all source files contain a copyright message. # (Done in two parts because Darwin "git grep" has broken support for compound # exclusion matches.) @@ -82,6 +84,9 @@ not git grep -l 'x/net/context' -- "*.go" # thread safety. git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' +# - Do not use "interface{}"; use "any" instead. +git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' + # - Do not call grpclog directly. Use grpclog.Component instead. git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' @@ -93,13 +98,6 @@ git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*. misspell -error . -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - PATH="/home/travis/bin:${PATH}" make proto && \ - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - # - gofmt, goimports, golint (with exceptions for generated code), go vet, # go mod tidy. # Perform these checks on each module inside gRPC. @@ -111,7 +109,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do goimports -l . 2>&1 | not grep -vE "\.pb\.go" golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" - go mod tidy + go mod tidy -compat=1.19 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) popd @@ -121,8 +119,9 @@ done # # TODO(dfawley): don't use deprecated functions in examples or first-party # plugins. +# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true # Error if anything other than deprecation warnings are printed. not grep -v "is deprecated:.*SA1019" "${SC_OUT}" # Only ignore the following deprecated types/fields/functions. @@ -172,8 +171,6 @@ proto.RegisteredExtension is deprecated proto.RegisteredExtensions is deprecated proto.RegisterMapType is deprecated proto.Unmarshaler is deprecated -resolver.Backend -resolver.GRPCLB Target is deprecated: Use the Target field in the BuildOptions instead. xxx_messageInfo_ ' "${SC_OUT}" diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go index 00ea2fec..21d5d2cb 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -4,7 +4,7 @@ // Package protojson marshals and unmarshals protocol buffer messages as JSON // format. It follows the guide at -// https://developers.google.com/protocol-buffers/docs/proto3#json. +// https://protobuf.dev/programming-guides/proto3#json. // // This package produces a different output than the standard "encoding/json" // package, which does not operate correctly on protocol buffer messages. diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go index d09d22e1..66b95870 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -106,13 +106,19 @@ func (o MarshalOptions) Format(m proto.Message) string { // MarshalOptions. Do not depend on the output being stable. It may change over // time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { - return o.marshal(m) + return o.marshal(nil, m) +} + +// MarshalAppend appends the JSON format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) } // marshal is a centralized function that all marshal operations go through. // For profiling purposes, avoid changing the name of this function or // introducing other code paths for marshal that do not go through this. -func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { if o.Multiline && o.Indent == "" { o.Indent = defaultIndent } @@ -120,7 +126,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { o.Resolver = protoregistry.GlobalTypes } - internalEnc, err := json.NewEncoder(o.Indent) + internalEnc, err := json.NewEncoder(b, o.Indent) if err != nil { return nil, err } @@ -128,7 +134,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { // Treat nil message interface as an empty message, // in which case the output in an empty JSON object. if m == nil { - return []byte("{}"), nil + return append(b, '{', '}'), nil } enc := encoder{internalEnc, o} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go index c85f8469..6c37d417 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -814,16 +814,22 @@ func (d decoder) unmarshalTimestamp(m protoreflect.Message) error { return d.unexpectedTokenError(tok) } - t, err := time.Parse(time.RFC3339Nano, tok.ParsedString()) + s := tok.ParsedString() + t, err := time.Parse(time.RFC3339Nano, s) if err != nil { return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) } - // Validate seconds. No need to validate nanos because time.Parse would have - // covered that already. + // Validate seconds. secs := t.Unix() if secs < minTimestampSeconds || secs > maxTimestampSeconds { return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString()) } + // Validate subseconds. + i := strings.LastIndexByte(s, '.') // start of subsecond field + j := strings.LastIndexAny(s, "Z-+") // start of timezone field + if i >= 0 && j >= i && j-i > len(".999999999") { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } fds := m.Descriptor().Fields() fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go index ebf6c652..722a7b41 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -101,13 +101,19 @@ func (o MarshalOptions) Format(m proto.Message) string { // MarshalOptions object. Do not depend on the output being stable. It may // change over time across different versions of the program. func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { - return o.marshal(m) + return o.marshal(nil, m) +} + +// MarshalAppend appends the textproto format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) } // marshal is a centralized function that all marshal operations go through. // For profiling purposes, avoid changing the name of this function or // introducing other code paths for marshal that do not go through this. -func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { var delims = [2]byte{'{', '}'} if o.Multiline && o.Indent == "" { @@ -117,7 +123,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { o.Resolver = protoregistry.GlobalTypes } - internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII) + internalEnc, err := text.NewEncoder(b, o.Indent, delims, o.EmitASCII) if err != nil { return nil, err } @@ -125,7 +131,7 @@ func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) { // Treat nil message interface as an empty message, // in which case there is nothing to output. if m == nil { - return []byte{}, nil + return b, nil } enc := encoder{internalEnc, o} diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index ce57f57e..f4b4686c 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package protowire parses and formats the raw wire encoding. -// See https://developers.google.com/protocol-buffers/docs/encoding. +// See https://protobuf.dev/programming-guides/encoding. // // For marshaling and unmarshaling entire protobuf messages, // use the "google.golang.org/protobuf/proto" package instead. @@ -29,12 +29,8 @@ const ( ) // IsValid reports whether the field number is semantically valid. -// -// Note that while numbers within the reserved range are semantically invalid, -// they are syntactically valid in the wire format. -// Implementations may treat records with reserved field numbers as unknown. func (n Number) IsValid() bool { - return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber + return MinValidNumber <= n && n <= MaxValidNumber } // Type represents the wire type. diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go index b13fd29e..d043a6eb 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -294,7 +294,7 @@ func (d *Decoder) isValueNext() bool { } // consumeToken constructs a Token for given Kind with raw value derived from -// current d.in and given size, and consumes the given size-lenght of it. +// current d.in and given size, and consumes the given size-length of it. func (d *Decoder) consumeToken(kind Kind, size int) Token { tok := Token{ kind: kind, diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go index fbdf3487..934f2dcb 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go @@ -41,8 +41,10 @@ type Encoder struct { // // If indent is a non-empty string, it causes every entry for an Array or Object // to be preceded by the indent and trailed by a newline. -func NewEncoder(indent string) (*Encoder, error) { - e := &Encoder{} +func NewEncoder(buf []byte, indent string) (*Encoder, error) { + e := &Encoder{ + out: buf, + } if len(indent) > 0 { if strings.Trim(indent, " \t") != "" { return nil, errors.New("indent may only be composed of space or tab characters") @@ -176,13 +178,13 @@ func appendFloat(out []byte, n float64, bitSize int) []byte { // WriteInt writes out the given signed integer in JSON number value. func (e *Encoder) WriteInt(n int64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatInt(n, 10)...) + e.out = strconv.AppendInt(e.out, n, 10) } // WriteUint writes out the given unsigned integer in JSON number value. func (e *Encoder) WriteUint(n uint64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatUint(n, 10)...) + e.out = strconv.AppendUint(e.out, n, 10) } // StartObject writes out the '{' symbol. diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index 427c62d0..87853e78 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -412,12 +412,13 @@ func (d *Decoder) parseFieldName() (tok Token, err error) { // Field number. Identify if input is a valid number that is not negative // and is decimal integer within 32-bit range. if num := parseNumber(d.in); num.size > 0 { + str := num.string(d.in) if !num.neg && num.kind == numDec { - if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil { + if _, err := strconv.ParseInt(str, 10, 32); err == nil { return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil } } - return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size]) + return Token{}, d.newSyntaxError("invalid field number: %s", str) } return Token{}, d.newSyntaxError("invalid field name: %s", errId(d.in)) diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go index 81a5d8c8..45c81f02 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go @@ -15,17 +15,12 @@ func (d *Decoder) parseNumberValue() (Token, bool) { if num.neg { numAttrs |= isNegative } - strSize := num.size - last := num.size - 1 - if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') { - strSize = last - } tok := Token{ kind: Scalar, attrs: numberValue, pos: len(d.orig) - len(d.in), raw: d.in[:num.size], - str: string(d.in[:strSize]), + str: num.string(d.in), numAttrs: numAttrs, } d.consume(num.size) @@ -46,6 +41,27 @@ type number struct { kind uint8 neg bool size int + // if neg, this is the length of whitespace and comments between + // the minus sign and the rest fo the number literal + sep int +} + +func (num number) string(data []byte) string { + strSize := num.size + last := num.size - 1 + if num.kind == numFloat && (data[last] == 'f' || data[last] == 'F') { + strSize = last + } + if num.neg && num.sep > 0 { + // strip whitespace/comments between negative sign and the rest + strLen := strSize - num.sep + str := make([]byte, strLen) + str[0] = data[0] + copy(str[1:], data[num.sep+1:strSize]) + return string(str) + } + return string(data[:strSize]) + } // parseNumber constructs a number object from given input. It allows for the @@ -67,19 +83,22 @@ func parseNumber(input []byte) number { } // Optional - + var sep int if s[0] == '-' { neg = true s = s[1:] size++ + // Consume any whitespace or comments between the + // negative sign and the rest of the number + lenBefore := len(s) + s = consume(s, 0) + sep = lenBefore - len(s) + size += sep if len(s) == 0 { return number{} } } - // C++ allows for whitespace and comments in between the negative sign and - // the rest of the number. This logic currently does not but is consistent - // with v1. - switch { case s[0] == '0': if len(s) > 1 { @@ -116,7 +135,7 @@ func parseNumber(input []byte) number { if len(s) > 0 && !isDelim(s[0]) { return number{} } - return number{kind: kind, neg: neg, size: size} + return number{kind: kind, neg: neg, size: size, sep: sep} } } s = s[1:] @@ -188,5 +207,5 @@ func parseNumber(input []byte) number { return number{} } - return number{kind: kind, neg: neg, size: size} + return number{kind: kind, neg: neg, size: size, sep: sep} } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go index da289ccc..cf7aed77 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -53,8 +53,10 @@ type encoderState struct { // If outputASCII is true, strings will be serialized in such a way that // multi-byte UTF-8 sequences are escaped. This property ensures that the // overall output is ASCII (as opposed to UTF-8). -func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { - e := &Encoder{} +func NewEncoder(buf []byte, indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { + e := &Encoder{ + encoderState: encoderState{out: buf}, + } if len(indent) > 0 { if strings.Trim(indent, " \t") != "" { return nil, errors.New("indent may only be composed of space and tab characters") @@ -195,13 +197,13 @@ func appendFloat(out []byte, n float64, bitSize int) []byte { // WriteInt writes out the given signed integer value. func (e *Encoder) WriteInt(n int64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatInt(n, 10)...) + e.out = strconv.AppendInt(e.out, n, 10) } // WriteUint writes out the given unsigned integer value. func (e *Encoder) WriteUint(n uint64) { e.prepareNext(scalar) - e.out = append(e.out, strconv.FormatUint(n, 10)...) + e.out = strconv.AppendUint(e.out, n, 10) } // WriteLiteral writes out the given string as a literal value without quotes. diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index e3cdf1c2..136f1b21 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -50,6 +50,7 @@ const ( FileDescriptorProto_Options_field_name protoreflect.Name = "options" FileDescriptorProto_SourceCodeInfo_field_name protoreflect.Name = "source_code_info" FileDescriptorProto_Syntax_field_name protoreflect.Name = "syntax" + FileDescriptorProto_Edition_field_name protoreflect.Name = "edition" FileDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.name" FileDescriptorProto_Package_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.package" @@ -63,6 +64,7 @@ const ( FileDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.options" FileDescriptorProto_SourceCodeInfo_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.source_code_info" FileDescriptorProto_Syntax_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.syntax" + FileDescriptorProto_Edition_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.edition" ) // Field numbers for google.protobuf.FileDescriptorProto. @@ -79,6 +81,7 @@ const ( FileDescriptorProto_Options_field_number protoreflect.FieldNumber = 8 FileDescriptorProto_SourceCodeInfo_field_number protoreflect.FieldNumber = 9 FileDescriptorProto_Syntax_field_number protoreflect.FieldNumber = 12 + FileDescriptorProto_Edition_field_number protoreflect.FieldNumber = 13 ) // Names for google.protobuf.DescriptorProto. @@ -180,13 +183,58 @@ const ( // Field names for google.protobuf.ExtensionRangeOptions. const ( ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + ExtensionRangeOptions_Declaration_field_name protoreflect.Name = "declaration" + ExtensionRangeOptions_Verification_field_name protoreflect.Name = "verification" ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option" + ExtensionRangeOptions_Declaration_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration" + ExtensionRangeOptions_Verification_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification" ) // Field numbers for google.protobuf.ExtensionRangeOptions. const ( ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + ExtensionRangeOptions_Declaration_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Verification_field_number protoreflect.FieldNumber = 3 +) + +// Full and short names for google.protobuf.ExtensionRangeOptions.VerificationState. +const ( + ExtensionRangeOptions_VerificationState_enum_fullname = "google.protobuf.ExtensionRangeOptions.VerificationState" + ExtensionRangeOptions_VerificationState_enum_name = "VerificationState" +) + +// Names for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_message_name protoreflect.Name = "Declaration" + ExtensionRangeOptions_Declaration_message_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration" +) + +// Field names for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_Number_field_name protoreflect.Name = "number" + ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name" + ExtensionRangeOptions_Declaration_Type_field_name protoreflect.Name = "type" + ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated" + ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved" + ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated" + + ExtensionRangeOptions_Declaration_Number_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number" + ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name" + ExtensionRangeOptions_Declaration_Type_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type" + ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated" + ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved" + ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated" +) + +// Field numbers for google.protobuf.ExtensionRangeOptions.Declaration. +const ( + ExtensionRangeOptions_Declaration_Number_field_number protoreflect.FieldNumber = 1 + ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2 + ExtensionRangeOptions_Declaration_Type_field_number protoreflect.FieldNumber = 3 + ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4 + ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5 + ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.FieldDescriptorProto. @@ -494,26 +542,29 @@ const ( // Field names for google.protobuf.MessageOptions. const ( - MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" - MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" - MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" - MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" - MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + MessageOptions_MessageSetWireFormat_field_name protoreflect.Name = "message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_name protoreflect.Name = "no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_name protoreflect.Name = "deprecated" + MessageOptions_MapEntry_field_name protoreflect.Name = "map_entry" + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + MessageOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" - MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" - MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" - MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" - MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" + MessageOptions_MessageSetWireFormat_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format" + MessageOptions_NoStandardDescriptorAccessor_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.no_standard_descriptor_accessor" + MessageOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated" + MessageOptions_MapEntry_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.map_entry" + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts" + MessageOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option" ) // Field numbers for google.protobuf.MessageOptions. const ( - MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 - MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 - MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 - MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 - MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + MessageOptions_MessageSetWireFormat_field_number protoreflect.FieldNumber = 1 + MessageOptions_NoStandardDescriptorAccessor_field_number protoreflect.FieldNumber = 2 + MessageOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + MessageOptions_MapEntry_field_number protoreflect.FieldNumber = 7 + MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11 + MessageOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) // Names for google.protobuf.FieldOptions. @@ -528,16 +579,26 @@ const ( FieldOptions_Packed_field_name protoreflect.Name = "packed" FieldOptions_Jstype_field_name protoreflect.Name = "jstype" FieldOptions_Lazy_field_name protoreflect.Name = "lazy" + FieldOptions_UnverifiedLazy_field_name protoreflect.Name = "unverified_lazy" FieldOptions_Deprecated_field_name protoreflect.Name = "deprecated" FieldOptions_Weak_field_name protoreflect.Name = "weak" + FieldOptions_DebugRedact_field_name protoreflect.Name = "debug_redact" + FieldOptions_Retention_field_name protoreflect.Name = "retention" + FieldOptions_Target_field_name protoreflect.Name = "target" + FieldOptions_Targets_field_name protoreflect.Name = "targets" FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype" FieldOptions_Packed_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.packed" FieldOptions_Jstype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.jstype" FieldOptions_Lazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.lazy" + FieldOptions_UnverifiedLazy_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.unverified_lazy" FieldOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.deprecated" FieldOptions_Weak_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.weak" + FieldOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact" + FieldOptions_Retention_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.retention" + FieldOptions_Target_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.target" + FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets" FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option" ) @@ -547,8 +608,13 @@ const ( FieldOptions_Packed_field_number protoreflect.FieldNumber = 2 FieldOptions_Jstype_field_number protoreflect.FieldNumber = 6 FieldOptions_Lazy_field_number protoreflect.FieldNumber = 5 + FieldOptions_UnverifiedLazy_field_number protoreflect.FieldNumber = 15 FieldOptions_Deprecated_field_number protoreflect.FieldNumber = 3 FieldOptions_Weak_field_number protoreflect.FieldNumber = 10 + FieldOptions_DebugRedact_field_number protoreflect.FieldNumber = 16 + FieldOptions_Retention_field_number protoreflect.FieldNumber = 17 + FieldOptions_Target_field_number protoreflect.FieldNumber = 18 + FieldOptions_Targets_field_number protoreflect.FieldNumber = 19 FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) @@ -564,6 +630,18 @@ const ( FieldOptions_JSType_enum_name = "JSType" ) +// Full and short names for google.protobuf.FieldOptions.OptionRetention. +const ( + FieldOptions_OptionRetention_enum_fullname = "google.protobuf.FieldOptions.OptionRetention" + FieldOptions_OptionRetention_enum_name = "OptionRetention" +) + +// Full and short names for google.protobuf.FieldOptions.OptionTargetType. +const ( + FieldOptions_OptionTargetType_enum_fullname = "google.protobuf.FieldOptions.OptionTargetType" + FieldOptions_OptionTargetType_enum_name = "OptionTargetType" +) + // Names for google.protobuf.OneofOptions. const ( OneofOptions_message_name protoreflect.Name = "OneofOptions" @@ -590,20 +668,23 @@ const ( // Field names for google.protobuf.EnumOptions. const ( - EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" - EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" - EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" + EnumOptions_AllowAlias_field_name protoreflect.Name = "allow_alias" + EnumOptions_Deprecated_field_name protoreflect.Name = "deprecated" + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts" + EnumOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option" - EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" - EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" - EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" + EnumOptions_AllowAlias_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias" + EnumOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated" + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts" + EnumOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option" ) // Field numbers for google.protobuf.EnumOptions. const ( - EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 - EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 - EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 + EnumOptions_AllowAlias_field_number protoreflect.FieldNumber = 2 + EnumOptions_Deprecated_field_number protoreflect.FieldNumber = 3 + EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6 + EnumOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999 ) // Names for google.protobuf.EnumValueOptions. @@ -813,11 +894,13 @@ const ( GeneratedCodeInfo_Annotation_SourceFile_field_name protoreflect.Name = "source_file" GeneratedCodeInfo_Annotation_Begin_field_name protoreflect.Name = "begin" GeneratedCodeInfo_Annotation_End_field_name protoreflect.Name = "end" + GeneratedCodeInfo_Annotation_Semantic_field_name protoreflect.Name = "semantic" GeneratedCodeInfo_Annotation_Path_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.path" GeneratedCodeInfo_Annotation_SourceFile_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.source_file" GeneratedCodeInfo_Annotation_Begin_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.begin" GeneratedCodeInfo_Annotation_End_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.end" + GeneratedCodeInfo_Annotation_Semantic_field_fullname protoreflect.FullName = "google.protobuf.GeneratedCodeInfo.Annotation.semantic" ) // Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. @@ -826,4 +909,11 @@ const ( GeneratedCodeInfo_Annotation_SourceFile_field_number protoreflect.FieldNumber = 2 GeneratedCodeInfo_Annotation_Begin_field_number protoreflect.FieldNumber = 3 GeneratedCodeInfo_Annotation_End_field_number protoreflect.FieldNumber = 4 + GeneratedCodeInfo_Annotation_Semantic_field_number protoreflect.FieldNumber = 5 +) + +// Full and short names for google.protobuf.GeneratedCodeInfo.Annotation.Semantic. +const ( + GeneratedCodeInfo_Annotation_Semantic_enum_fullname = "google.protobuf.GeneratedCodeInfo.Annotation.Semantic" + GeneratedCodeInfo_Annotation_Semantic_enum_name = "Semantic" ) diff --git a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go index 3bc71013..e0f75fea 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/type_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/type_gen.go @@ -32,6 +32,7 @@ const ( Type_Options_field_name protoreflect.Name = "options" Type_SourceContext_field_name protoreflect.Name = "source_context" Type_Syntax_field_name protoreflect.Name = "syntax" + Type_Edition_field_name protoreflect.Name = "edition" Type_Name_field_fullname protoreflect.FullName = "google.protobuf.Type.name" Type_Fields_field_fullname protoreflect.FullName = "google.protobuf.Type.fields" @@ -39,6 +40,7 @@ const ( Type_Options_field_fullname protoreflect.FullName = "google.protobuf.Type.options" Type_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Type.source_context" Type_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Type.syntax" + Type_Edition_field_fullname protoreflect.FullName = "google.protobuf.Type.edition" ) // Field numbers for google.protobuf.Type. @@ -49,6 +51,7 @@ const ( Type_Options_field_number protoreflect.FieldNumber = 4 Type_SourceContext_field_number protoreflect.FieldNumber = 5 Type_Syntax_field_number protoreflect.FieldNumber = 6 + Type_Edition_field_number protoreflect.FieldNumber = 7 ) // Names for google.protobuf.Field. @@ -121,12 +124,14 @@ const ( Enum_Options_field_name protoreflect.Name = "options" Enum_SourceContext_field_name protoreflect.Name = "source_context" Enum_Syntax_field_name protoreflect.Name = "syntax" + Enum_Edition_field_name protoreflect.Name = "edition" Enum_Name_field_fullname protoreflect.FullName = "google.protobuf.Enum.name" Enum_Enumvalue_field_fullname protoreflect.FullName = "google.protobuf.Enum.enumvalue" Enum_Options_field_fullname protoreflect.FullName = "google.protobuf.Enum.options" Enum_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Enum.source_context" Enum_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Enum.syntax" + Enum_Edition_field_fullname protoreflect.FullName = "google.protobuf.Enum.edition" ) // Field numbers for google.protobuf.Enum. @@ -136,6 +141,7 @@ const ( Enum_Options_field_number protoreflect.FieldNumber = 3 Enum_SourceContext_field_number protoreflect.FieldNumber = 4 Enum_Syntax_field_number protoreflect.FieldNumber = 5 + Enum_Edition_field_number protoreflect.FieldNumber = 6 ) // Names for google.protobuf.EnumValue. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index 11a6128b..185ef2ef 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -59,7 +59,6 @@ func NewConverter(t reflect.Type, fd protoreflect.FieldDescriptor) Converter { default: return newSingularConverter(t, fd) } - panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) } var ( diff --git a/vendor/google.golang.org/protobuf/internal/order/order.go b/vendor/google.golang.org/protobuf/internal/order/order.go index 33745ed0..dea522e1 100644 --- a/vendor/google.golang.org/protobuf/internal/order/order.go +++ b/vendor/google.golang.org/protobuf/internal/order/order.go @@ -33,7 +33,7 @@ var ( return !inOneof(ox) && inOneof(oy) } // Fields in disjoint oneof sets are sorted by declaration index. - if ox != nil && oy != nil && ox != oy { + if inOneof(ox) && inOneof(oy) && ox != oy { return ox.Index() < oy.Index() } // Fields sorted by field number. diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index fea589c4..61a84d34 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -87,7 +87,7 @@ func (sb *Builder) grow(n int) { // Unlike strings.Builder, we do not need to copy over the contents // of the old buffer since our builder provides no API for // retrieving previously created strings. - sb.buf = make([]byte, 2*(cap(sb.buf)+n)) + sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) } func (sb *Builder) last(n int) string { diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index b480c501..0999f29d 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 28 - Patch = 1 + Minor = 31 + Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go index 08d2a46f..ec71e717 100644 --- a/vendor/google.golang.org/protobuf/proto/doc.go +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -5,16 +5,13 @@ // Package proto provides functions operating on protocol buffer messages. // // For documentation on protocol buffers in general, see: -// -// https://developers.google.com/protocol-buffers +// https://protobuf.dev. // // For a tutorial on using protocol buffers with Go, see: -// -// https://developers.google.com/protocol-buffers/docs/gotutorial +// https://protobuf.dev/getting-started/gotutorial. // // For a guide to generated Go protocol buffer code, see: -// -// https://developers.google.com/protocol-buffers/docs/reference/go-generated +// https://protobuf.dev/reference/go/go-generated. // // # Binary serialization // diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 67948dd1..1a0be1b0 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -5,30 +5,39 @@ package proto import ( - "bytes" - "math" "reflect" - "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/reflect/protoreflect" ) -// Equal reports whether two messages are equal. -// If two messages marshal to the same bytes under deterministic serialization, -// then Equal is guaranteed to report true. +// Equal reports whether two messages are equal, +// by recursively comparing the fields of the message. // -// Two messages are equal if they belong to the same message descriptor, -// have the same set of populated known and extension field values, -// and the same set of unknown fields values. If either of the top-level -// messages are invalid, then Equal reports true only if both are invalid. +// - Bytes fields are equal if they contain identical bytes. +// Empty bytes (regardless of nil-ness) are considered equal. // -// Scalar values are compared with the equivalent of the == operator in Go, -// except bytes values which are compared using bytes.Equal and -// floating point values which specially treat NaNs as equal. -// Message values are compared by recursively calling Equal. -// Lists are equal if each element value is also equal. -// Maps are equal if they have the same set of keys, where the pair of values -// for each key is also equal. +// - Floating-point fields are equal if they contain the same value. +// Unlike the == operator, a NaN is equal to another NaN. +// +// - Other scalar fields are equal if they contain the same value. +// +// - Message fields are equal if they have +// the same set of populated known and extension field values, and +// the same set of unknown fields values. +// +// - Lists are equal if they are the same length and +// each corresponding element is equal. +// +// - Maps are equal if they have the same set of keys and +// the corresponding value for each key is equal. +// +// An invalid message is not equal to a valid message. +// An invalid message is only equal to another invalid message of the +// same type. An invalid message often corresponds to a nil pointer +// of the concrete message type. For example, (*pb.M)(nil) is not equal +// to &pb.M{}. +// If two valid messages marshal to the same bytes under deterministic +// serialization, then Equal is guaranteed to report true. func Equal(x, y Message) bool { if x == nil || y == nil { return x == nil && y == nil @@ -42,130 +51,7 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } - return equalMessage(mx, my) -} - -// equalMessage compares two messages. -func equalMessage(mx, my protoreflect.Message) bool { - if mx.Descriptor() != my.Descriptor() { - return false - } - - nx := 0 - equal := true - mx.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { - nx++ - vy := my.Get(fd) - equal = my.Has(fd) && equalField(fd, vx, vy) - return equal - }) - if !equal { - return false - } - ny := 0 - my.Range(func(fd protoreflect.FieldDescriptor, vx protoreflect.Value) bool { - ny++ - return true - }) - if nx != ny { - return false - } - - return equalUnknown(mx.GetUnknown(), my.GetUnknown()) -} - -// equalField compares two fields. -func equalField(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { - switch { - case fd.IsList(): - return equalList(fd, x.List(), y.List()) - case fd.IsMap(): - return equalMap(fd, x.Map(), y.Map()) - default: - return equalValue(fd, x, y) - } -} - -// equalMap compares two maps. -func equalMap(fd protoreflect.FieldDescriptor, x, y protoreflect.Map) bool { - if x.Len() != y.Len() { - return false - } - equal := true - x.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { - vy := y.Get(k) - equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) - return equal - }) - return equal -} - -// equalList compares two lists. -func equalList(fd protoreflect.FieldDescriptor, x, y protoreflect.List) bool { - if x.Len() != y.Len() { - return false - } - for i := x.Len() - 1; i >= 0; i-- { - if !equalValue(fd, x.Get(i), y.Get(i)) { - return false - } - } - return true -} - -// equalValue compares two singular values. -func equalValue(fd protoreflect.FieldDescriptor, x, y protoreflect.Value) bool { - switch fd.Kind() { - case protoreflect.BoolKind: - return x.Bool() == y.Bool() - case protoreflect.EnumKind: - return x.Enum() == y.Enum() - case protoreflect.Int32Kind, protoreflect.Sint32Kind, - protoreflect.Int64Kind, protoreflect.Sint64Kind, - protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind: - return x.Int() == y.Int() - case protoreflect.Uint32Kind, protoreflect.Uint64Kind, - protoreflect.Fixed32Kind, protoreflect.Fixed64Kind: - return x.Uint() == y.Uint() - case protoreflect.FloatKind, protoreflect.DoubleKind: - fx := x.Float() - fy := y.Float() - if math.IsNaN(fx) || math.IsNaN(fy) { - return math.IsNaN(fx) && math.IsNaN(fy) - } - return fx == fy - case protoreflect.StringKind: - return x.String() == y.String() - case protoreflect.BytesKind: - return bytes.Equal(x.Bytes(), y.Bytes()) - case protoreflect.MessageKind, protoreflect.GroupKind: - return equalMessage(x.Message(), y.Message()) - default: - return x.Interface() == y.Interface() - } -} - -// equalUnknown compares unknown fields by direct comparison on the raw bytes -// of each individual field number. -func equalUnknown(x, y protoreflect.RawFields) bool { - if len(x) != len(y) { - return false - } - if bytes.Equal([]byte(x), []byte(y)) { - return true - } - - mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) - my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) - for len(x) > 0 { - fnum, _, n := protowire.ConsumeField(x) - mx[fnum] = append(mx[fnum], x[:n]...) - x = x[n:] - } - for len(y) > 0 { - fnum, _, n := protowire.ConsumeField(y) - my[fnum] = append(my[fnum], y[:n]...) - y = y[n:] - } - return reflect.DeepEqual(mx, my) + vx := protoreflect.ValueOfMessage(mx) + vy := protoreflect.ValueOfMessage(my) + return vx.Equal(vy) } diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go index 554b9c6c..f1692b49 100644 --- a/vendor/google.golang.org/protobuf/proto/size.go +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -73,23 +73,27 @@ func (o MarshalOptions) sizeField(fd protoreflect.FieldDescriptor, value protore } func (o MarshalOptions) sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) { + sizeTag := protowire.SizeTag(num) + if fd.IsPacked() && list.Len() > 0 { content := 0 for i, llen := 0, list.Len(); i < llen; i++ { content += o.sizeSingular(num, fd.Kind(), list.Get(i)) } - return protowire.SizeTag(num) + protowire.SizeBytes(content) + return sizeTag + protowire.SizeBytes(content) } for i, llen := 0, list.Len(); i < llen; i++ { - size += protowire.SizeTag(num) + o.sizeSingular(num, fd.Kind(), list.Get(i)) + size += sizeTag + o.sizeSingular(num, fd.Kind(), list.Get(i)) } return size } func (o MarshalOptions) sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) { + sizeTag := protowire.SizeTag(num) + mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool { - size += protowire.SizeTag(num) + size += sizeTag size += protowire.SizeBytes(o.sizeField(fd.MapKey(), key.Value()) + o.sizeField(fd.MapValue(), value)) return true }) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index b03c1223..717b106f 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -35,6 +35,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte { b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo) case 12: b = p.appendSingularField(b, "syntax", nil) + case 13: + b = p.appendSingularField(b, "edition", nil) } return b } @@ -236,6 +238,8 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte { b = p.appendSingularField(b, "deprecated", nil) case 7: b = p.appendSingularField(b, "map_entry", nil) + case 11: + b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -279,6 +283,8 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte { b = p.appendSingularField(b, "allow_alias", nil) case 3: b = p.appendSingularField(b, "deprecated", nil) + case 6: + b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -345,10 +351,20 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte { b = p.appendSingularField(b, "jstype", nil) case 5: b = p.appendSingularField(b, "lazy", nil) + case 15: + b = p.appendSingularField(b, "unverified_lazy", nil) case 3: b = p.appendSingularField(b, "deprecated", nil) case 10: b = p.appendSingularField(b, "weak", nil) + case 16: + b = p.appendSingularField(b, "debug_redact", nil) + case 17: + b = p.appendSingularField(b, "retention", nil) + case 18: + b = p.appendSingularField(b, "target", nil) + case 19: + b = p.appendRepeatedField(b, "targets", nil) case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) } @@ -404,6 +420,10 @@ func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte { switch (*p)[0] { case 999: b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption) + case 2: + b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration) + case 3: + b = p.appendSingularField(b, "verification", nil) } return b } @@ -459,3 +479,24 @@ func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte { } return b } + +func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte { + if len(*p) == 0 { + return b + } + switch (*p)[0] { + case 1: + b = p.appendSingularField(b, "number", nil) + case 2: + b = p.appendSingularField(b, "full_name", nil) + case 3: + b = p.appendSingularField(b, "type", nil) + case 4: + b = p.appendSingularField(b, "is_repeated", nil) + case 5: + b = p.appendSingularField(b, "reserved", nil) + case 6: + b = p.appendSingularField(b, "repeated", nil) + } + return b +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go index f3198107..37601b78 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -148,7 +148,7 @@ type Message interface { // be preserved in marshaling or other operations. IsValid() bool - // ProtoMethods returns optional fast-path implementions of various operations. + // ProtoMethods returns optional fast-path implementations of various operations. // This method may return nil. // // The returned methods type is identical to diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go new file mode 100644 index 00000000..59165254 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go @@ -0,0 +1,168 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "bytes" + "fmt" + "math" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" +) + +// Equal reports whether v1 and v2 are recursively equal. +// +// - Values of different types are always unequal. +// +// - Bytes values are equal if they contain identical bytes. +// Empty bytes (regardless of nil-ness) are considered equal. +// +// - Floating point values are equal if they contain the same value. +// Unlike the == operator, a NaN is equal to another NaN. +// +// - Enums are equal if they contain the same number. +// Since Value does not contain an enum descriptor, +// enum values do not consider the type of the enum. +// +// - Other scalar values are equal if they contain the same value. +// +// - Message values are equal if they belong to the same message descriptor, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// - Lists are equal if they are the same length and +// each corresponding element is equal. +// +// - Maps are equal if they have the same set of keys and +// the corresponding value for each key is equal. +func (v1 Value) Equal(v2 Value) bool { + return equalValue(v1, v2) +} + +func equalValue(x, y Value) bool { + eqType := x.typ == y.typ + switch x.typ { + case nilType: + return eqType + case boolType: + return eqType && x.Bool() == y.Bool() + case int32Type, int64Type: + return eqType && x.Int() == y.Int() + case uint32Type, uint64Type: + return eqType && x.Uint() == y.Uint() + case float32Type, float64Type: + return eqType && equalFloat(x.Float(), y.Float()) + case stringType: + return eqType && x.String() == y.String() + case bytesType: + return eqType && bytes.Equal(x.Bytes(), y.Bytes()) + case enumType: + return eqType && x.Enum() == y.Enum() + default: + switch x := x.Interface().(type) { + case Message: + y, ok := y.Interface().(Message) + return ok && equalMessage(x, y) + case List: + y, ok := y.Interface().(List) + return ok && equalList(x, y) + case Map: + y, ok := y.Interface().(Map) + return ok && equalMap(x, y) + default: + panic(fmt.Sprintf("unknown type: %T", x)) + } + } +} + +// equalFloat compares two floats, where NaNs are treated as equal. +func equalFloat(x, y float64) bool { + if math.IsNaN(x) || math.IsNaN(y) { + return math.IsNaN(x) && math.IsNaN(y) + } + return x == y +} + +// equalMessage compares two messages. +func equalMessage(mx, my Message) bool { + if mx.Descriptor() != my.Descriptor() { + return false + } + + nx := 0 + equal := true + mx.Range(func(fd FieldDescriptor, vx Value) bool { + nx++ + vy := my.Get(fd) + equal = my.Has(fd) && equalValue(vx, vy) + return equal + }) + if !equal { + return false + } + ny := 0 + my.Range(func(fd FieldDescriptor, vx Value) bool { + ny++ + return true + }) + if nx != ny { + return false + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +// equalList compares two lists. +func equalList(x, y List) bool { + if x.Len() != y.Len() { + return false + } + for i := x.Len() - 1; i >= 0; i-- { + if !equalValue(x.Get(i), y.Get(i)) { + return false + } + } + return true +} + +// equalMap compares two maps. +func equalMap(x, y Map) bool { + if x.Len() != y.Len() { + return false + } + equal := true + x.Range(func(k MapKey, vx Value) bool { + vy := y.Get(k) + equal = y.Has(k) && equalValue(vx, vy) + return equal + }) + return equal +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +func equalUnknown(x, y RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[FieldNumber]RawFields) + my := make(map[FieldNumber]RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + return reflect.DeepEqual(mx, my) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index ca8e28c5..08e5ef73 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -54,11 +54,11 @@ import ( // // Append a 0 to a "repeated int32" field. // // Since the Value returned by Mutable is guaranteed to alias // // the source message, modifying the Value modifies the message. -// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0)) +// message.Mutable(fieldDesc).List().Append(protoreflect.ValueOfInt32(0)) // // // Assign [0] to a "repeated int32" field by creating a new Value, // // modifying it, and assigning it. -// list := message.NewField(fieldDesc).(List) +// list := message.NewField(fieldDesc).List() // list.Append(protoreflect.ValueOfInt32(0)) // message.Set(fieldDesc, list) // // ERROR: Since it is not defined whether Set aliases the source, diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go index 58352a69..aeb55977 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -46,7 +46,7 @@ var conflictPolicy = "panic" // "panic" | "warn" | "ignore" // It is a variable so that the behavior is easily overridden in another file. var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { const env = "GOLANG_PROTOBUF_REGISTRATION_CONFLICT" - const faq = "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict" + const faq = "https://protobuf.dev/reference/go/faq#namespace-conflict" policy := conflictPolicy if v := os.Getenv(env); v != "" { policy = v diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index abe4ab51..04c00f73 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -48,6 +48,64 @@ import ( sync "sync" ) +// The verification state of the extension range. +type ExtensionRangeOptions_VerificationState int32 + +const ( + // All the extensions of the range must be declared. + ExtensionRangeOptions_DECLARATION ExtensionRangeOptions_VerificationState = 0 + ExtensionRangeOptions_UNVERIFIED ExtensionRangeOptions_VerificationState = 1 +) + +// Enum value maps for ExtensionRangeOptions_VerificationState. +var ( + ExtensionRangeOptions_VerificationState_name = map[int32]string{ + 0: "DECLARATION", + 1: "UNVERIFIED", + } + ExtensionRangeOptions_VerificationState_value = map[string]int32{ + "DECLARATION": 0, + "UNVERIFIED": 1, + } +) + +func (x ExtensionRangeOptions_VerificationState) Enum() *ExtensionRangeOptions_VerificationState { + p := new(ExtensionRangeOptions_VerificationState) + *p = x + return p +} + +func (x ExtensionRangeOptions_VerificationState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() +} + +func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[0] +} + +func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *ExtensionRangeOptions_VerificationState) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = ExtensionRangeOptions_VerificationState(num) + return nil +} + +// Deprecated: Use ExtensionRangeOptions_VerificationState.Descriptor instead. +func (ExtensionRangeOptions_VerificationState) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3, 0} +} + type FieldDescriptorProto_Type int32 const ( @@ -137,11 +195,11 @@ func (x FieldDescriptorProto_Type) String() string { } func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() } func (FieldDescriptorProto_Type) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[0] + return &file_google_protobuf_descriptor_proto_enumTypes[1] } func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber { @@ -197,11 +255,11 @@ func (x FieldDescriptorProto_Label) String() string { } func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() } func (FieldDescriptorProto_Label) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[1] + return &file_google_protobuf_descriptor_proto_enumTypes[2] } func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber { @@ -258,11 +316,11 @@ func (x FileOptions_OptimizeMode) String() string { } func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() } func (FileOptions_OptimizeMode) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[2] + return &file_google_protobuf_descriptor_proto_enumTypes[3] } func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber { @@ -288,7 +346,13 @@ type FieldOptions_CType int32 const ( // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_STRING FieldOptions_CType = 0 + // The option [ctype=CORD] may be applied to a non-repeated field of type + // "bytes". It indicates that in C++, the data should be stored in a Cord + // instead of a string. For very large strings, this may reduce memory + // fragmentation. It may also allow better performance when parsing from a + // Cord, or when parsing with aliasing enabled, as the parsed Cord may then + // alias the original buffer. FieldOptions_CORD FieldOptions_CType = 1 FieldOptions_STRING_PIECE FieldOptions_CType = 2 ) @@ -318,11 +382,11 @@ func (x FieldOptions_CType) String() string { } func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() } func (FieldOptions_CType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[3] + return &file_google_protobuf_descriptor_proto_enumTypes[4] } func (x FieldOptions_CType) Number() protoreflect.EnumNumber { @@ -380,11 +444,11 @@ func (x FieldOptions_JSType) String() string { } func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() } func (FieldOptions_JSType) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[4] + return &file_google_protobuf_descriptor_proto_enumTypes[5] } func (x FieldOptions_JSType) Number() protoreflect.EnumNumber { @@ -406,6 +470,152 @@ func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1} } +// If set to RETENTION_SOURCE, the option will be omitted from the binary. +// Note: as of January 2023, support for this is in progress and does not yet +// have an effect (b/264593489). +type FieldOptions_OptionRetention int32 + +const ( + FieldOptions_RETENTION_UNKNOWN FieldOptions_OptionRetention = 0 + FieldOptions_RETENTION_RUNTIME FieldOptions_OptionRetention = 1 + FieldOptions_RETENTION_SOURCE FieldOptions_OptionRetention = 2 +) + +// Enum value maps for FieldOptions_OptionRetention. +var ( + FieldOptions_OptionRetention_name = map[int32]string{ + 0: "RETENTION_UNKNOWN", + 1: "RETENTION_RUNTIME", + 2: "RETENTION_SOURCE", + } + FieldOptions_OptionRetention_value = map[string]int32{ + "RETENTION_UNKNOWN": 0, + "RETENTION_RUNTIME": 1, + "RETENTION_SOURCE": 2, + } +) + +func (x FieldOptions_OptionRetention) Enum() *FieldOptions_OptionRetention { + p := new(FieldOptions_OptionRetention) + *p = x + return p +} + +func (x FieldOptions_OptionRetention) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor() +} + +func (FieldOptions_OptionRetention) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[6] +} + +func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_OptionRetention) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_OptionRetention(num) + return nil +} + +// Deprecated: Use FieldOptions_OptionRetention.Descriptor instead. +func (FieldOptions_OptionRetention) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 2} +} + +// This indicates the types of entities that the field may apply to when used +// as an option. If it is unset, then the field may be freely used as an +// option on any kind of entity. Note: as of January 2023, support for this is +// in progress and does not yet have an effect (b/264593489). +type FieldOptions_OptionTargetType int32 + +const ( + FieldOptions_TARGET_TYPE_UNKNOWN FieldOptions_OptionTargetType = 0 + FieldOptions_TARGET_TYPE_FILE FieldOptions_OptionTargetType = 1 + FieldOptions_TARGET_TYPE_EXTENSION_RANGE FieldOptions_OptionTargetType = 2 + FieldOptions_TARGET_TYPE_MESSAGE FieldOptions_OptionTargetType = 3 + FieldOptions_TARGET_TYPE_FIELD FieldOptions_OptionTargetType = 4 + FieldOptions_TARGET_TYPE_ONEOF FieldOptions_OptionTargetType = 5 + FieldOptions_TARGET_TYPE_ENUM FieldOptions_OptionTargetType = 6 + FieldOptions_TARGET_TYPE_ENUM_ENTRY FieldOptions_OptionTargetType = 7 + FieldOptions_TARGET_TYPE_SERVICE FieldOptions_OptionTargetType = 8 + FieldOptions_TARGET_TYPE_METHOD FieldOptions_OptionTargetType = 9 +) + +// Enum value maps for FieldOptions_OptionTargetType. +var ( + FieldOptions_OptionTargetType_name = map[int32]string{ + 0: "TARGET_TYPE_UNKNOWN", + 1: "TARGET_TYPE_FILE", + 2: "TARGET_TYPE_EXTENSION_RANGE", + 3: "TARGET_TYPE_MESSAGE", + 4: "TARGET_TYPE_FIELD", + 5: "TARGET_TYPE_ONEOF", + 6: "TARGET_TYPE_ENUM", + 7: "TARGET_TYPE_ENUM_ENTRY", + 8: "TARGET_TYPE_SERVICE", + 9: "TARGET_TYPE_METHOD", + } + FieldOptions_OptionTargetType_value = map[string]int32{ + "TARGET_TYPE_UNKNOWN": 0, + "TARGET_TYPE_FILE": 1, + "TARGET_TYPE_EXTENSION_RANGE": 2, + "TARGET_TYPE_MESSAGE": 3, + "TARGET_TYPE_FIELD": 4, + "TARGET_TYPE_ONEOF": 5, + "TARGET_TYPE_ENUM": 6, + "TARGET_TYPE_ENUM_ENTRY": 7, + "TARGET_TYPE_SERVICE": 8, + "TARGET_TYPE_METHOD": 9, + } +) + +func (x FieldOptions_OptionTargetType) Enum() *FieldOptions_OptionTargetType { + p := new(FieldOptions_OptionTargetType) + *p = x + return p +} + +func (x FieldOptions_OptionTargetType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor() +} + +func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[7] +} + +func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FieldOptions_OptionTargetType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FieldOptions_OptionTargetType(num) + return nil +} + +// Deprecated: Use FieldOptions_OptionTargetType.Descriptor instead. +func (FieldOptions_OptionTargetType) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 3} +} + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, // or neither? HTTP based RPC implementation may choose GET verb for safe // methods, and PUT verb for idempotent methods instead of the default POST. @@ -442,11 +652,11 @@ func (x MethodOptions_IdempotencyLevel) String() string { } func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor() } func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[5] + return &file_google_protobuf_descriptor_proto_enumTypes[8] } func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber { @@ -468,6 +678,70 @@ func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0} } +// Represents the identified object's effect on the element in the original +// .proto file. +type GeneratedCodeInfo_Annotation_Semantic int32 + +const ( + // There is no effect or the effect is indescribable. + GeneratedCodeInfo_Annotation_NONE GeneratedCodeInfo_Annotation_Semantic = 0 + // The element is set or otherwise mutated. + GeneratedCodeInfo_Annotation_SET GeneratedCodeInfo_Annotation_Semantic = 1 + // An alias to the element is returned. + GeneratedCodeInfo_Annotation_ALIAS GeneratedCodeInfo_Annotation_Semantic = 2 +) + +// Enum value maps for GeneratedCodeInfo_Annotation_Semantic. +var ( + GeneratedCodeInfo_Annotation_Semantic_name = map[int32]string{ + 0: "NONE", + 1: "SET", + 2: "ALIAS", + } + GeneratedCodeInfo_Annotation_Semantic_value = map[string]int32{ + "NONE": 0, + "SET": 1, + "ALIAS": 2, + } +) + +func (x GeneratedCodeInfo_Annotation_Semantic) Enum() *GeneratedCodeInfo_Annotation_Semantic { + p := new(GeneratedCodeInfo_Annotation_Semantic) + *p = x + return p +} + +func (x GeneratedCodeInfo_Annotation_Semantic) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor() +} + +func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[9] +} + +func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = GeneratedCodeInfo_Annotation_Semantic(num) + return nil +} + +// Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead. +func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0} +} + // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { @@ -544,8 +818,12 @@ type FileDescriptorProto struct { // development tools. SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` // The syntax of the proto file. - // The supported values are "proto2" and "proto3". + // The supported values are "proto2", "proto3", and "editions". + // + // If `edition` is present, this value must be "editions". Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + // The edition of the proto file, which is an opaque string. + Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"` } func (x *FileDescriptorProto) Reset() { @@ -664,6 +942,13 @@ func (x *FileDescriptorProto) GetSyntax() string { return "" } +func (x *FileDescriptorProto) GetEdition() string { + if x != nil && x.Edition != nil { + return *x.Edition + } + return "" +} + // Describes a message type. type DescriptorProto struct { state protoimpl.MessageState @@ -794,7 +1079,21 @@ type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` -} + // go/protobuf-stripping-extension-declarations + // Like Metadata, but we use a repeated field to hold all extension + // declarations. This should avoid the size increases of transforming a large + // extension range into small ranges in generated binaries. + Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"` + // The verification state of the range. + // TODO(b/278783756): flip the default to DECLARATION once all empty ranges + // are marked as UNVERIFIED. + Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"` +} + +// Default values for ExtensionRangeOptions fields. +const ( + Default_ExtensionRangeOptions_Verification = ExtensionRangeOptions_UNVERIFIED +) func (x *ExtensionRangeOptions) Reset() { *x = ExtensionRangeOptions{} @@ -835,6 +1134,20 @@ func (x *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption return nil } +func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declaration { + if x != nil { + return x.Declaration + } + return nil +} + +func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState { + if x != nil && x.Verification != nil { + return *x.Verification + } + return Default_ExtensionRangeOptions_Verification +} + // Describes a field within a message. type FieldDescriptorProto struct { state protoimpl.MessageState @@ -860,7 +1173,6 @@ type FieldDescriptorProto struct { // For booleans, "true" or "false". // For strings, contains the default text contents (not escaped in any way). // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` // If set, gives the index of a oneof in the containing type's oneof_decl // list. This field is a member of that oneof. @@ -1382,22 +1694,22 @@ type FileOptions struct { // inappropriate because proto packages do not normally start with backwards // domain names. JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). + // Controls the name of the wrapper Java class generated for the .proto file. + // That class will always contain the .proto file's getDescriptor() method as + // well as any top-level extensions defined in the .proto file. + // If java_multiple_files is disabled, then all the other classes from the + // .proto file will be nested inside the single wrapper outer class. JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If set true, then the Java code generator will generate a separate .java + // If enabled, then the Java code generator will generate a separate .java // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be + // file. Thus, these types will *not* be nested inside the wrapper class + // named by java_outer_classname. However, the wrapper class will still be // generated to contain the file's getDescriptor() method as well as any // top-level extensions defined in the file. JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` // This option does nothing. // - // Deprecated: Do not use. + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 @@ -1531,7 +1843,7 @@ func (x *FileOptions) GetJavaMultipleFiles() bool { return Default_FileOptions_JavaMultipleFiles } -// Deprecated: Do not use. +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. func (x *FileOptions) GetJavaGenerateEqualsAndHash() bool { if x != nil && x.JavaGenerateEqualsAndHash != nil { return *x.JavaGenerateEqualsAndHash @@ -1670,10 +1982,12 @@ type MessageOptions struct { // efficient, has fewer features, and is more complicated. // // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } + // + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // // Note that the message cannot have any defined fields; MessageSets only // have extensions. // @@ -1692,28 +2006,44 @@ type MessageOptions struct { // for the message, or it will be completely ignored; in the very least, // this is a formalization for deprecating messages. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + // // Whether the message is an automatically generated map entry type for the // maps field. // // For maps fields: - // map map_field = 1; + // + // map map_field = 1; + // // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; + // + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; // // Implementations may choose not to generate the map_entry=true message, but // use a native map in the target language to hold the keys and values. // The reflection APIs in such implementations still need to work as // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // Enable the legacy handling of JSON field name conflicts. This lowercases + // and strips underscored from the fields before comparison in proto3 only. + // The new behavior takes `json_name` into account and applies to proto2 as + // well. + // + // This should only be used as a temporary measure against broken builds due + // to the change in behavior for JSON field name conflicts. + // + // TODO(b/261750190) This is legacy behavior we plan to remove once downstream + // teams have had time to migrate. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -1785,6 +2115,14 @@ func (x *MessageOptions) GetMapEntry() bool { return false } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { + if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil { + return *x.DeprecatedLegacyJsonFieldConflicts + } + return false +} + func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -1800,8 +2138,10 @@ type FieldOptions struct { // The ctype option instructs the C++ code generator to use a different // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! + // options below. This option is only implemented to support use of + // [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of + // type "bytes" in the open source release -- sorry, we'll try to include + // other types in a future version! Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` // The packed option can be enabled for repeated primitive fields to enable // a more efficient representation on the wire. Rather than repeatedly @@ -1838,7 +2178,6 @@ type FieldOptions struct { // call from multiple threads concurrently, while non-const methods continue // to require exclusive access. // - // // Note that implementations may choose not to check required fields within // a lazy sub-message. That is, calling IsInitialized() on the outer message // may return true even if the inner message has missing required fields. @@ -1849,7 +2188,14 @@ type FieldOptions struct { // implementation must either *always* check its required fields, or *never* // check its required fields, regardless of whether or not the message has // been parsed. + // + // As of May 2022, lazy verifies the contents of the byte stream during + // parsing. An invalid byte stream will cause the overall parsing to fail. Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // unverified_lazy does no correctness checks on the byte stream. This should + // only be used where lazy with verification is prohibitive for performance + // reasons. + UnverifiedLazy *bool `protobuf:"varint,15,opt,name=unverified_lazy,json=unverifiedLazy,def=0" json:"unverified_lazy,omitempty"` // Is this field deprecated? // Depending on the target platform, this can emit Deprecated annotations // for accessors, or it will be completely ignored; in the very least, this @@ -1857,17 +2203,26 @@ type FieldOptions struct { Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // For Google-internal migration only. Do not use. Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // Indicate that the field value should not be printed out when using debug + // formats, e.g. when the field contains sensitive credentials. + DebugRedact *bool `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"` + Retention *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"` + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + Target *FieldOptions_OptionTargetType `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"` + Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } // Default values for FieldOptions fields. const ( - Default_FieldOptions_Ctype = FieldOptions_STRING - Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL - Default_FieldOptions_Lazy = bool(false) - Default_FieldOptions_Deprecated = bool(false) - Default_FieldOptions_Weak = bool(false) + Default_FieldOptions_Ctype = FieldOptions_STRING + Default_FieldOptions_Jstype = FieldOptions_JS_NORMAL + Default_FieldOptions_Lazy = bool(false) + Default_FieldOptions_UnverifiedLazy = bool(false) + Default_FieldOptions_Deprecated = bool(false) + Default_FieldOptions_Weak = bool(false) + Default_FieldOptions_DebugRedact = bool(false) ) func (x *FieldOptions) Reset() { @@ -1930,6 +2285,13 @@ func (x *FieldOptions) GetLazy() bool { return Default_FieldOptions_Lazy } +func (x *FieldOptions) GetUnverifiedLazy() bool { + if x != nil && x.UnverifiedLazy != nil { + return *x.UnverifiedLazy + } + return Default_FieldOptions_UnverifiedLazy +} + func (x *FieldOptions) GetDeprecated() bool { if x != nil && x.Deprecated != nil { return *x.Deprecated @@ -1944,6 +2306,35 @@ func (x *FieldOptions) GetWeak() bool { return Default_FieldOptions_Weak } +func (x *FieldOptions) GetDebugRedact() bool { + if x != nil && x.DebugRedact != nil { + return *x.DebugRedact + } + return Default_FieldOptions_DebugRedact +} + +func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention { + if x != nil && x.Retention != nil { + return *x.Retention + } + return FieldOptions_RETENTION_UNKNOWN +} + +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType { + if x != nil && x.Target != nil { + return *x.Target + } + return FieldOptions_TARGET_TYPE_UNKNOWN +} + +func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType { + if x != nil { + return x.Targets + } + return nil +} + func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2014,6 +2405,15 @@ type EnumOptions struct { // for the enum, or it will be completely ignored; in the very least, this // is a formalization for deprecating enums. Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enable the legacy handling of JSON field name conflicts. This lowercases + // and strips underscored from the fields before comparison in proto3 only. + // The new behavior takes `json_name` into account and applies to proto2 as + // well. + // TODO(b/261750190) Remove this legacy behavior once downstream teams have + // had time to migrate. + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` } @@ -2069,6 +2469,14 @@ func (x *EnumOptions) GetDeprecated() bool { return Default_EnumOptions_Deprecated } +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool { + if x != nil && x.DeprecatedLegacyJsonFieldConflicts != nil { + return *x.DeprecatedLegacyJsonFieldConflicts + } + return false +} + func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { if x != nil { return x.UninterpretedOption @@ -2399,43 +2807,48 @@ type SourceCodeInfo struct { // tools. // // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } + // + // message Foo { + // optional string foo = 1; + // } + // // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi + // + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). // // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendant. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` } @@ -2651,6 +3064,108 @@ func (x *DescriptorProto_ReservedRange) GetEnd() int32 { return 0 } +type ExtensionRangeOptions_Declaration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The extension number declared within the extension range. + Number *int32 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"` + // The fully-qualified name of the extension field. There must be a leading + // dot in front of the full name. + FullName *string `protobuf:"bytes,2,opt,name=full_name,json=fullName" json:"full_name,omitempty"` + // The fully-qualified type name of the extension field. Unlike + // Metadata.type, Declaration.type must have a leading dot for messages + // and enums. + Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"` + // Deprecated. Please use "repeated". + // + // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. + IsRepeated *bool `protobuf:"varint,4,opt,name=is_repeated,json=isRepeated" json:"is_repeated,omitempty"` + // If true, indicates that the number is reserved in the extension range, + // and any extension field with the number will fail to compile. Set this + // when a declared extension field is deleted. + Reserved *bool `protobuf:"varint,5,opt,name=reserved" json:"reserved,omitempty"` + // If true, indicates that the extension must be defined as repeated. + // Otherwise the extension must be defined as optional. + Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"` +} + +func (x *ExtensionRangeOptions_Declaration) Reset() { + *x = ExtensionRangeOptions_Declaration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionRangeOptions_Declaration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionRangeOptions_Declaration) ProtoMessage() {} + +func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionRangeOptions_Declaration.ProtoReflect.Descriptor instead. +func (*ExtensionRangeOptions_Declaration) Descriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *ExtensionRangeOptions_Declaration) GetNumber() int32 { + if x != nil && x.Number != nil { + return *x.Number + } + return 0 +} + +func (x *ExtensionRangeOptions_Declaration) GetFullName() string { + if x != nil && x.FullName != nil { + return *x.FullName + } + return "" +} + +func (x *ExtensionRangeOptions_Declaration) GetType() string { + if x != nil && x.Type != nil { + return *x.Type + } + return "" +} + +// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. +func (x *ExtensionRangeOptions_Declaration) GetIsRepeated() bool { + if x != nil && x.IsRepeated != nil { + return *x.IsRepeated + } + return false +} + +func (x *ExtensionRangeOptions_Declaration) GetReserved() bool { + if x != nil && x.Reserved != nil { + return *x.Reserved + } + return false +} + +func (x *ExtensionRangeOptions_Declaration) GetRepeated() bool { + if x != nil && x.Repeated != nil { + return *x.Repeated + } + return false +} + // Range of reserved numeric values. Reserved values may not be used by // entries in the same enum. Reserved ranges may not overlap. // @@ -2669,7 +3184,7 @@ type EnumDescriptorProto_EnumReservedRange struct { func (x *EnumDescriptorProto_EnumReservedRange) Reset() { *x = EnumDescriptorProto_EnumReservedRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2682,7 +3197,7 @@ func (x *EnumDescriptorProto_EnumReservedRange) String() string { func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[23] + mi := &file_google_protobuf_descriptor_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2715,8 +3230,8 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { // The name of the uninterpreted option. Each string represents a segment in // a dot-separated name. is_extension is true iff a segment represents an // extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -// "foo.(bar.baz).qux". +// E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents +// "foo.(bar.baz).moo". type UninterpretedOption_NamePart struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2729,7 +3244,7 @@ type UninterpretedOption_NamePart struct { func (x *UninterpretedOption_NamePart) Reset() { *x = UninterpretedOption_NamePart{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2742,7 +3257,7 @@ func (x *UninterpretedOption_NamePart) String() string { func (*UninterpretedOption_NamePart) ProtoMessage() {} func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[24] + mi := &file_google_protobuf_descriptor_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2781,23 +3296,34 @@ type SourceCodeInfo_Location struct { // location. // // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] + // the root FileDescriptorProto to the place where the definition occurs. + // For example, this path: + // + // [ 4, 3, 2, 7, 1 ] + // // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 + // + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; + // + // repeated DescriptorProto message_type = 4; + // // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; + // + // repeated FieldDescriptorProto field = 2; + // // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; + // + // optional string name = 1; // // Thus, the above path gives the location of a field name. If we removed // the last element: - // [ 4, 3, 2, 7 ] + // + // [ 4, 3, 2, 7 ] + // // this path refers to the whole field declaration (from the beginning // of the label to the terminating semicolon). Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` @@ -2826,34 +3352,34 @@ type SourceCodeInfo_Location struct { // // Examples: // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; + // // Comment attached to moo. + // // + // // Another line attached to moo. + // optional double moo = 4; // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. + // // Detached comment for corge. This is not leading or trailing comments + // // to moo or corge because there are blank lines separating it from + // // both. // - // // Detached comment for corge paragraph 2. + // // Detached comment for corge paragraph 2. // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; // - // // ignored detached comments. + // // ignored detached comments. LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` @@ -2862,7 +3388,7 @@ type SourceCodeInfo_Location struct { func (x *SourceCodeInfo_Location) Reset() { *x = SourceCodeInfo_Location{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2875,7 +3401,7 @@ func (x *SourceCodeInfo_Location) String() string { func (*SourceCodeInfo_Location) ProtoMessage() {} func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[25] + mi := &file_google_protobuf_descriptor_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2940,15 +3466,16 @@ type GeneratedCodeInfo_Annotation struct { // that relates to the identified object. Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past + // relates to the identified object. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"` } func (x *GeneratedCodeInfo_Annotation) Reset() { *x = GeneratedCodeInfo_Annotation{} if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2961,7 +3488,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string { func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_descriptor_proto_msgTypes[26] + mi := &file_google_protobuf_descriptor_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3005,6 +3532,13 @@ func (x *GeneratedCodeInfo_Annotation) GetEnd() int32 { return 0 } +func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotation_Semantic { + if x != nil && x.Semantic != nil { + return *x.Semantic + } + return GeneratedCodeInfo_Annotation_NONE +} + var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor var file_google_protobuf_descriptor_proto_rawDesc = []byte{ @@ -3016,7 +3550,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x22, 0xe4, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, @@ -3054,330 +3588,423 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, - 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, - 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, - 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, + 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, - 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, + 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, - 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x7c, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, - 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, - 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, - 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, - 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, - 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, - 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, - 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, - 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, - 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, - 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, - 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, - 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, - 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, - 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, - 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, - 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, - 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, - 0x44, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, - 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, + 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, + 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, + 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, + 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, + 0xad, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, + 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x68, + 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x0a, + 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xb3, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, + 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65, + 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x34, + 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, + 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, + 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, + 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, + 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, - 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, - 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, - 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, + 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, + 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, + 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, + 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, + 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, + 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, + 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, + 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, + 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, + 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, + 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, + 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, + 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, + 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, + 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, + 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, + 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, + 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, + 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45, + 0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, + 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, + 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, + 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, + 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, - 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, - 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, - 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, - 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, - 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, - 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, - 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, - 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, - 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, - 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, - 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, - 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, - 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, - 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, - 0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, - 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, - 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, - 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, - 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, - 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, - 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, - 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, - 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, - 0x10, 0x27, 0x22, 0xd1, 0x02, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, - 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, - 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, - 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, - 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xe2, 0x03, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, - 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, - 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, - 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, + 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46, + 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, + 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, + 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, + 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, + 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, + 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, + 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, + 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, + 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, + 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, + 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, + 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, + 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, + 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, + 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, + 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, + 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, + 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, + 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, + 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, + 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, + 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, + 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, + 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, + 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, + 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, + 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, + 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, + 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, + 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb, + 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, + 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, + 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, + 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x85, 0x09, 0x0a, + 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, + 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, + 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, + 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, + 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, + 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, + 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, + 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, + 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, + 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, + 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, - 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, - 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, - 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, - 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, - 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, - 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, - 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, - 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, - 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, - 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, - 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, + 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, - 0x22, 0xc0, 0x01, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, - 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, - 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, + 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, + 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, + 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, + 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, + 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, + 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, + 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, + 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, + 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, + 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, + 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, + 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, + 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, + 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, + 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, + 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, + 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e, + 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, + 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, + 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, + 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, + 0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, + 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, + 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, @@ -3385,97 +4012,95 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, - 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, + 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, + 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, + 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, + 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, + 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, + 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, + 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, + 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, + 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, + 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, + 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, + 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, + 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, - 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, - 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, - 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, - 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, - 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, - 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, - 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, - 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, - 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, - 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, - 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, - 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, - 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, - 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, - 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, - 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, - 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, - 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, - 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, - 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, - 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, - 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, - 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd1, 0x01, - 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, - 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, - 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, - 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, - 0x64, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, + 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, + 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, + 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, + 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, + 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, + 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, + 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, + 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, + 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, + 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, + 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, + 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, + 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, + 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, + 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, + 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, + 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, + 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, + 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, + 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, + 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, + 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, + 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, + 0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, @@ -3498,92 +4123,103 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 6) -var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 10) +var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 28) var file_google_protobuf_descriptor_proto_goTypes = []interface{}{ - (FieldDescriptorProto_Type)(0), // 0: google.protobuf.FieldDescriptorProto.Type - (FieldDescriptorProto_Label)(0), // 1: google.protobuf.FieldDescriptorProto.Label - (FileOptions_OptimizeMode)(0), // 2: google.protobuf.FileOptions.OptimizeMode - (FieldOptions_CType)(0), // 3: google.protobuf.FieldOptions.CType - (FieldOptions_JSType)(0), // 4: google.protobuf.FieldOptions.JSType - (MethodOptions_IdempotencyLevel)(0), // 5: google.protobuf.MethodOptions.IdempotencyLevel - (*FileDescriptorSet)(nil), // 6: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 7: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 8: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 9: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 10: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 11: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 12: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 13: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 14: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 15: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 16: google.protobuf.FileOptions - (*MessageOptions)(nil), // 17: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 18: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 19: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 20: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 21: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 22: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 23: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 24: google.protobuf.UninterpretedOption - (*SourceCodeInfo)(nil), // 25: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 26: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 27: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 28: google.protobuf.DescriptorProto.ReservedRange - (*EnumDescriptorProto_EnumReservedRange)(nil), // 29: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*UninterpretedOption_NamePart)(nil), // 30: google.protobuf.UninterpretedOption.NamePart - (*SourceCodeInfo_Location)(nil), // 31: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 32: google.protobuf.GeneratedCodeInfo.Annotation + (ExtensionRangeOptions_VerificationState)(0), // 0: google.protobuf.ExtensionRangeOptions.VerificationState + (FieldDescriptorProto_Type)(0), // 1: google.protobuf.FieldDescriptorProto.Type + (FieldDescriptorProto_Label)(0), // 2: google.protobuf.FieldDescriptorProto.Label + (FileOptions_OptimizeMode)(0), // 3: google.protobuf.FileOptions.OptimizeMode + (FieldOptions_CType)(0), // 4: google.protobuf.FieldOptions.CType + (FieldOptions_JSType)(0), // 5: google.protobuf.FieldOptions.JSType + (FieldOptions_OptionRetention)(0), // 6: google.protobuf.FieldOptions.OptionRetention + (FieldOptions_OptionTargetType)(0), // 7: google.protobuf.FieldOptions.OptionTargetType + (MethodOptions_IdempotencyLevel)(0), // 8: google.protobuf.MethodOptions.IdempotencyLevel + (GeneratedCodeInfo_Annotation_Semantic)(0), // 9: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 10: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 11: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 12: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 13: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 14: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 15: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 16: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 17: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 18: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 19: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 20: google.protobuf.FileOptions + (*MessageOptions)(nil), // 21: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 22: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 23: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 24: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 25: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 26: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 27: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 28: google.protobuf.UninterpretedOption + (*SourceCodeInfo)(nil), // 29: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 30: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 31: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 32: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 33: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 34: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*UninterpretedOption_NamePart)(nil), // 35: google.protobuf.UninterpretedOption.NamePart + (*SourceCodeInfo_Location)(nil), // 36: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 37: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 7, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 8, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 12, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 14, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 10, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 16, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 25, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo - 10, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 10, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 8, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 12, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 27, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 11, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 17, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 28, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 24, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 1, // 16: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label - 0, // 17: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 18, // 18: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 19, // 19: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 13, // 20: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 20, // 21: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 29, // 22: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 21, // 23: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 15, // 24: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 22, // 25: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 23, // 26: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions - 2, // 27: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 24, // 28: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 29: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 3, // 30: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType - 4, // 31: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType - 24, // 32: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 33: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 34: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 35: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 24, // 36: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 5, // 37: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 24, // 38: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 30, // 39: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart - 31, // 40: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 32, // 41: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 9, // 42: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 43, // [43:43] is the sub-list for method output_type - 43, // [43:43] is the sub-list for method input_type - 43, // [43:43] is the sub-list for extension type_name - 43, // [43:43] is the sub-list for extension extendee - 0, // [0:43] is the sub-list for field type_name + 11, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 12, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 16, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 18, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 14, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 20, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 29, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 14, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 14, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 12, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 16, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 31, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 15, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 21, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 32, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 28, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 33, // 16: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 0, // 17: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState + 2, // 18: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label + 1, // 19: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type + 22, // 20: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 23, // 21: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 17, // 22: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 24, // 23: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 34, // 24: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 25, // 25: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 19, // 26: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 26, // 27: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 27, // 28: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 3, // 29: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode + 28, // 30: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 31: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 4, // 32: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType + 5, // 33: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType + 6, // 34: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention + 7, // 35: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType + 7, // 36: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType + 28, // 37: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 38: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 39: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 40: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 28, // 41: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 8, // 42: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel + 28, // 43: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 35, // 44: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 36, // 45: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 37, // 46: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 13, // 47: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 9, // 48: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 49, // [49:49] is the sub-list for method output_type + 49, // [49:49] is the sub-list for method input_type + 49, // [49:49] is the sub-list for extension type_name + 49, // [49:49] is the sub-list for extension extendee + 0, // [0:49] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -3887,7 +4523,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { + switch v := v.(*ExtensionRangeOptions_Declaration); i { case 0: return &v.state case 1: @@ -3899,7 +4535,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UninterpretedOption_NamePart); i { + switch v := v.(*EnumDescriptorProto_EnumReservedRange); i { case 0: return &v.state case 1: @@ -3911,7 +4547,7 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SourceCodeInfo_Location); i { + switch v := v.(*UninterpretedOption_NamePart); i { case 0: return &v.state case 1: @@ -3923,6 +4559,18 @@ func file_google_protobuf_descriptor_proto_init() { } } file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SourceCodeInfo_Location); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GeneratedCodeInfo_Annotation); i { case 0: return &v.state @@ -3940,8 +4588,8 @@ func file_google_protobuf_descriptor_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 6, - NumMessages: 27, + NumEnums: 10, + NumMessages: 28, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 8c10797b..580b232f 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -37,8 +37,7 @@ // It is functionally a tuple of the full name of the remote message type and // the serialized bytes of the remote message value. // -// -// Constructing an Any +// # Constructing an Any // // An Any message containing another message value is constructed using New: // @@ -48,8 +47,7 @@ // } // ... // make use of any // -// -// Unmarshaling an Any +// # Unmarshaling an Any // // With a populated Any message, the underlying message can be serialized into // a remote concrete message value in a few ways. @@ -95,8 +93,7 @@ // listed in the case clauses are linked into the Go binary and therefore also // registered in the global registry. // -// -// Type checking an Any +// # Type checking an Any // // In order to type check whether an Any message represents some other message, // then use the MessageIs method: @@ -115,7 +112,6 @@ // } // ... // make use of m // } -// package anypb import ( @@ -136,45 +132,49 @@ import ( // // Example 1: Pack and unpack a message in C++. // -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } // // Example 2: Pack and unpack a message in Java. // -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := anypb.New(foo) -// if err != nil { -// ... -// } -// ... -// foo := &pb.Foo{} -// if err := any.UnmarshalTo(foo); err != nil { -// ... -// } +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// // or ... +// if (any.isSameTypeAs(Foo.getDefaultInstance())) { +// foo = any.unpack(Foo.getDefaultInstance()); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := anypb.New(foo) +// if err != nil { +// ... +// } +// ... +// foo := &pb.Foo{} +// if err := any.UnmarshalTo(foo); err != nil { +// ... +// } // // The pack methods provided by protobuf library will by default use // 'type.googleapis.com/full.type.name' as the type URL and the unpack @@ -182,35 +182,33 @@ import ( // in the type URL, for example "foo.bar.com/x/y.z" will yield type // name "y.z". // -// // JSON // ==== // The JSON representation of an `Any` value uses the regular // representation of the deserialized, embedded message, with an // additional field `@type` which contains the type URL. Example: // -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } // -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } // // If the embedded message type is well-known and has a custom JSON // representation, that representation will be embedded adding a field // `value` which holds the custom JSON in addition to the `@type` // field. Example (for message [google.protobuf.Duration][]): // -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } type Any struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -228,14 +226,14 @@ type Any struct { // scheme `http`, `https`, or no scheme, one can optionally set up a type // server that maps type URLs to message definitions as follows: // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) + // - If no scheme is provided, `https` is assumed. + // - An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // - Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) // // Note: this functionality is not currently available in the official // protobuf release, and it is not used for type URLs beginning with @@ -243,7 +241,6 @@ type Any struct { // // Schemes other than `http`, `https` (or the empty scheme) might be // used with implementation specific semantics. - // TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index a583ca2f..df709a8d 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -35,8 +35,7 @@ // // The Duration message represents a signed span of time. // -// -// Conversion to a Go Duration +// # Conversion to a Go Duration // // The AsDuration method can be used to convert a Duration message to a // standard Go time.Duration value: @@ -65,15 +64,13 @@ // the resulting value to the closest representable value (e.g., math.MaxInt64 // for positive overflow and math.MinInt64 for negative overflow). // -// -// Conversion from a Go Duration +// # Conversion from a Go Duration // // The durationpb.New function can be used to construct a Duration message // from a standard Go time.Duration value: // // dur := durationpb.New(d) // ... // make use of d as a *durationpb.Duration -// package durationpb import ( @@ -96,43 +93,43 @@ import ( // // Example 1: Compute Duration from two Timestamps in pseudo code. // -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; // -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; // -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (duration.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } // // Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. // -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; // -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; // -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } // // Example 3: Compute Duration from datetime.timedelta in Python. // -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) // // # JSON Mapping // @@ -143,8 +140,6 @@ import ( // encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should // be expressed in JSON format as "3.000000001s", and 3 seconds and 1 // microsecond should be expressed in JSON format as "3.000001s". -// -// type Duration struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index c9ae9213..81511a33 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -36,8 +36,7 @@ // The Timestamp message represents a timestamp, // an instant in time since the Unix epoch (January 1st, 1970). // -// -// Conversion to a Go Time +// # Conversion to a Go Time // // The AsTime method can be used to convert a Timestamp message to a // standard Go time.Time value in UTC: @@ -59,8 +58,7 @@ // ... // handle error // } // -// -// Conversion from a Go Time +// # Conversion from a Go Time // // The timestamppb.New function can be used to construct a Timestamp message // from a standard Go time.Time value: @@ -72,7 +70,6 @@ // // ts := timestamppb.Now() // ... // make use of ts as a *timestamppb.Timestamp -// package timestamppb import ( @@ -101,52 +98,50 @@ import ( // // Example 1: Compute Timestamp from POSIX `time()`. // -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); // // Example 2: Compute Timestamp from POSIX `gettimeofday()`. // -// struct timeval tv; -// gettimeofday(&tv, NULL); +// struct timeval tv; +// gettimeofday(&tv, NULL); // -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); // // Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. // -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; // -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); // // Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. // -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// long millis = System.currentTimeMillis(); // +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); // // Example 5: Compute Timestamp from Java `Instant.now()`. // -// Instant now = Instant.now(); -// -// Timestamp timestamp = -// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) -// .setNanos(now.getNano()).build(); +// Instant now = Instant.now(); // +// Timestamp timestamp = +// Timestamp.newBuilder().setSeconds(now.getEpochSecond()) +// .setNanos(now.getNano()).build(); // // Example 6: Compute Timestamp from current time in Python. // -// timestamp = Timestamp() -// timestamp.GetCurrentTime() +// timestamp = Timestamp() +// timestamp.GetCurrentTime() // // # JSON Mapping // @@ -172,10 +167,8 @@ import ( // [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with // the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use // the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime() // ) to obtain a formatter capable of generating timestamps in this format. -// -// type Timestamp struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/kmodules.xyz/client-go/Makefile b/vendor/kmodules.xyz/client-go/Makefile index c05270cd..7ba38d5d 100644 --- a/vendor/kmodules.xyz/client-go/Makefile +++ b/vendor/kmodules.xyz/client-go/Makefile @@ -58,7 +58,7 @@ ARCH := $(if $(GOARCH),$(GOARCH),$(shell go env GOARCH)) BASEIMAGE_PROD ?= gcr.io/distroless/static-debian11 BASEIMAGE_DBG ?= debian:bullseye -GO_VERSION ?= 1.20 +GO_VERSION ?= 1.21 BUILD_IMAGE ?= ghcr.io/appscode/golang-dev:$(GO_VERSION) OUTBIN = bin/$(OS)_$(ARCH)/$(BIN) diff --git a/vendor/kmodules.xyz/client-go/api/v1/object.go b/vendor/kmodules.xyz/client-go/api/v1/object.go index 4e16224e..419d3cab 100644 --- a/vendor/kmodules.xyz/client-go/api/v1/object.go +++ b/vendor/kmodules.xyz/client-go/api/v1/object.go @@ -216,8 +216,8 @@ type ObjectInfo struct { Ref ObjectReference `json:"ref" protobuf:"bytes,2,opt,name=ref"` } -// +kubebuilder:validation:Enum=authn;authz;auth_secret;backup_via;catalog;cert_issuer;config;connect_via;exposed_by;event;located_on;monitored_by;ocm_bind;offshoot;ops;placed_into;policy;recommended_for;restore_into;scaled_by;storage;view -// ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,storage,view) +// +kubebuilder:validation:Enum=authn;authz;auth_secret;backup_via;catalog;cert_issuer;config;connect_via;exposed_by;event;located_on;monitored_by;ocm_bind;offshoot;ops;placed_into;policy;recommended_for;restore_into;scaled_by;source;storage;view +// ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,source,storage,view) type EdgeLabel string func (e EdgeLabel) Direct() bool { diff --git a/vendor/kmodules.xyz/client-go/api/v1/object_enum.go b/vendor/kmodules.xyz/client-go/api/v1/object_enum.go index 403d3aed..65792ec2 100644 --- a/vendor/kmodules.xyz/client-go/api/v1/object_enum.go +++ b/vendor/kmodules.xyz/client-go/api/v1/object_enum.go @@ -52,6 +52,8 @@ const ( EdgeLabelRestoreInto EdgeLabel = "restore_into" // EdgeLabelScaledBy is a EdgeLabel of type scaled_by. EdgeLabelScaledBy EdgeLabel = "scaled_by" + // EdgeLabelSource is a EdgeLabel of type source. + EdgeLabelSource EdgeLabel = "source" // EdgeLabelStorage is a EdgeLabel of type storage. EdgeLabelStorage EdgeLabel = "storage" // EdgeLabelView is a EdgeLabel of type view. @@ -81,6 +83,7 @@ var _EdgeLabelNames = []string{ string(EdgeLabelRecommendedFor), string(EdgeLabelRestoreInto), string(EdgeLabelScaledBy), + string(EdgeLabelSource), string(EdgeLabelStorage), string(EdgeLabelView), } @@ -115,6 +118,7 @@ func EdgeLabelValues() []EdgeLabel { EdgeLabelRecommendedFor, EdgeLabelRestoreInto, EdgeLabelScaledBy, + EdgeLabelSource, EdgeLabelStorage, EdgeLabelView, } @@ -153,6 +157,7 @@ var _EdgeLabelValue = map[string]EdgeLabel{ "recommended_for": EdgeLabelRecommendedFor, "restore_into": EdgeLabelRestoreInto, "scaled_by": EdgeLabelScaledBy, + "source": EdgeLabelSource, "storage": EdgeLabelStorage, "view": EdgeLabelView, } diff --git a/vendor/kmodules.xyz/client-go/meta/encoding.go b/vendor/kmodules.xyz/client-go/meta/encoding.go index 18a889b3..abaa4f0e 100644 --- a/vendor/kmodules.xyz/client-go/meta/encoding.go +++ b/vendor/kmodules.xyz/client-go/meta/encoding.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +// nolint: goconst package meta import ( diff --git a/vendor/kmodules.xyz/client-go/meta/lib.go b/vendor/kmodules.xyz/client-go/meta/lib.go index 1b3a92b5..bf456b8f 100644 --- a/vendor/kmodules.xyz/client-go/meta/lib.go +++ b/vendor/kmodules.xyz/client-go/meta/lib.go @@ -210,3 +210,20 @@ func max(x, y int) int { } return y } + +func IsOfficialType(group string) bool { + switch { + case group == "": + return true + case !strings.ContainsRune(group, '.'): + return true + case group == "k8s.io" || strings.HasSuffix(group, ".k8s.io"): + return true + case group == "kubernetes.io" || strings.HasSuffix(group, ".kubernetes.io"): + return true + case group == "x-k8s.io" || strings.HasSuffix(group, ".x-k8s.io"): + return true + default: + return false + } +} diff --git a/vendor/kmodules.xyz/go-containerregistry/LICENSE b/vendor/kmodules.xyz/go-containerregistry/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/kmodules.xyz/go-containerregistry/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/kmodules.xyz/go-containerregistry/name/lib.go b/vendor/kmodules.xyz/go-containerregistry/name/lib.go new file mode 100644 index 00000000..334b5173 --- /dev/null +++ b/vendor/kmodules.xyz/go-containerregistry/name/lib.go @@ -0,0 +1,104 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package name + +import ( + "fmt" + "strings" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/crane" + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" +) + +type Image struct { + Original string + Name string + Registry string + Repository string + Tag string + Digest string +} + +func parseReference(s string, extractTag bool, opts ...name.Option) (*Image, error) { + ref, err := name.ParseReference(s, opts...) + if err != nil { + return nil, err + } + + var img Image + switch u := ref.(type) { + case name.Tag: + img.Registry = u.RegistryStr() + img.Repository = u.RepositoryStr() + img.Tag = u.TagStr() + if extractTag { + img.Digest, err = crane.Digest(s, crane.WithAuthFromKeychain(authn.DefaultKeychain)) + if err != nil { + return nil, err + } + } else { + img.Digest = "" + } + img.Name = u.Name() + img.Original = u.String() + case name.Digest: + img.Registry = u.RegistryStr() + img.Repository = u.RepositoryStr() + if tag, err := name.NewTag( + strings.Split(s, "@")[0], // skip the digest + append(opts, name.WithDefaultTag(""))..., // don't use defaultTag "latest" + ); err == nil { + img.Tag = tag.TagStr() + } + img.Digest = u.DigestStr() + img.Name = u.Name() + img.Original = u.String() + default: + return nil, fmt.Errorf("unknown image %T", ref) + } + return &img, nil +} + +func ParseReference(s string, opts ...name.Option) (*Image, error) { + return parseReference(s, false, opts...) +} + +func ParseOrExtractDigest(s string, opts ...name.Option) (*Image, error) { + return parseReference(s, true, opts...) +} + +func IsPrivateImage(img string) (bool, error) { + reference, err := name.ParseReference(img) + if err != nil { + return false, err + } + + _, err = remote.Get(reference, remote.WithAuthFromKeychain(authn.DefaultKeychain)) + if err == nil { + return false, nil + } + if strings.Contains(err.Error(), "UNAUTHORIZED") { + return true, nil + } + if strings.Contains(err.Error(), "MANIFEST_UNKNOWN") { // If the image is kind loaded (not available online) + return true, nil + } + + return true, err +} diff --git a/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/clusterstatus_types.go b/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/clusterstatus_types.go index 29a844e2..d7be93ae 100644 --- a/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/clusterstatus_types.go +++ b/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/clusterstatus_types.go @@ -28,6 +28,10 @@ const ( ResourceClusterStatuses = "clusterstatuses" ) +const ( + KeyACEManaged = "ace.appscode.com/managed" +) + // ClusterStatus is the Schema for any resource supported by resource-metrics library // +genclient diff --git a/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/openapi_generated.go b/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/openapi_generated.go index ef871224..ab84aebd 100644 --- a/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/openapi_generated.go +++ b/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/openapi_generated.go @@ -349,6 +349,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/resource-metadata/apis/meta/v1alpha1.ResourceBlockDefinitionList": schema_resource_metadata_apis_meta_v1alpha1_ResourceBlockDefinitionList(ref), "kmodules.xyz/resource-metadata/apis/meta/v1alpha1.ResourceBlockDefinitionSpec": schema_resource_metadata_apis_meta_v1alpha1_ResourceBlockDefinitionSpec(ref), "kmodules.xyz/resource-metadata/apis/meta/v1alpha1.ResourceCalculator": schema_resource_metadata_apis_meta_v1alpha1_ResourceCalculator(ref), + "kmodules.xyz/resource-metadata/apis/meta/v1alpha1.ResourceCalculatorRequest": schema_resource_metadata_apis_meta_v1alpha1_ResourceCalculatorRequest(ref), "kmodules.xyz/resource-metadata/apis/meta/v1alpha1.ResourceCalculatorResponse": schema_resource_metadata_apis_meta_v1alpha1_ResourceCalculatorResponse(ref), "kmodules.xyz/resource-metadata/apis/meta/v1alpha1.ResourceColumn": schema_resource_metadata_apis_meta_v1alpha1_ResourceColumn(ref), "kmodules.xyz/resource-metadata/apis/meta/v1alpha1.ResourceColumnDefinition": schema_resource_metadata_apis_meta_v1alpha1_ResourceColumnDefinition(ref), @@ -392,10 +393,17 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/resource-metadata/apis/shared.ActionInfo": schema_kmodulesxyz_resource_metadata_apis_shared_ActionInfo(ref), "kmodules.xyz/resource-metadata/apis/shared.ActionTemplate": schema_kmodulesxyz_resource_metadata_apis_shared_ActionTemplate(ref), "kmodules.xyz/resource-metadata/apis/shared.ActionTemplateGroup": schema_kmodulesxyz_resource_metadata_apis_shared_ActionTemplateGroup(ref), + "kmodules.xyz/resource-metadata/apis/shared.BootstrapPresets": schema_kmodulesxyz_resource_metadata_apis_shared_BootstrapPresets(ref), "kmodules.xyz/resource-metadata/apis/shared.Dashboard": schema_kmodulesxyz_resource_metadata_apis_shared_Dashboard(ref), "kmodules.xyz/resource-metadata/apis/shared.DashboardVar": schema_kmodulesxyz_resource_metadata_apis_shared_DashboardVar(ref), "kmodules.xyz/resource-metadata/apis/shared.DeploymentParameters": schema_kmodulesxyz_resource_metadata_apis_shared_DeploymentParameters(ref), + "kmodules.xyz/resource-metadata/apis/shared.HelmInfo": schema_kmodulesxyz_resource_metadata_apis_shared_HelmInfo(ref), + "kmodules.xyz/resource-metadata/apis/shared.HelmRelease": schema_kmodulesxyz_resource_metadata_apis_shared_HelmRelease(ref), + "kmodules.xyz/resource-metadata/apis/shared.HelmRepository": schema_kmodulesxyz_resource_metadata_apis_shared_HelmRepository(ref), "kmodules.xyz/resource-metadata/apis/shared.If": schema_kmodulesxyz_resource_metadata_apis_shared_If(ref), + "kmodules.xyz/resource-metadata/apis/shared.ImageRegistrySpec": schema_kmodulesxyz_resource_metadata_apis_shared_ImageRegistrySpec(ref), + "kmodules.xyz/resource-metadata/apis/shared.RegistryInfo": schema_kmodulesxyz_resource_metadata_apis_shared_RegistryInfo(ref), + "kmodules.xyz/resource-metadata/apis/shared.RegistryProxies": schema_kmodulesxyz_resource_metadata_apis_shared_RegistryProxies(ref), "kmodules.xyz/resource-metadata/apis/shared.ResourceLocator": schema_kmodulesxyz_resource_metadata_apis_shared_ResourceLocator(ref), "kmodules.xyz/resource-metadata/apis/shared.ResourceQuery": schema_kmodulesxyz_resource_metadata_apis_shared_ResourceQuery(ref), "kmodules.xyz/resource-metadata/apis/shared.SourceLocator": schema_kmodulesxyz_resource_metadata_apis_shared_SourceLocator(ref), @@ -16019,6 +16027,28 @@ func schema_resource_metadata_apis_meta_v1alpha1_PageBlockLayout(ref common.Refe Ref: ref("kmodules.xyz/resource-metadata/apis/meta/v1alpha1.PageBlockTableDefinition"), }, }, + "requiredFeatureSets": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, }, Required: []string{"kind", "ref", "query"}, }, @@ -16101,6 +16131,28 @@ func schema_resource_metadata_apis_meta_v1alpha1_PageBlockOutline(ref common.Ref Ref: ref("kmodules.xyz/resource-metadata/apis/meta/v1alpha1.ResourceTableDefinitionRef"), }, }, + "requiredFeatureSets": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, }, Required: []string{"kind", "ref", "query"}, }, @@ -16825,7 +16877,7 @@ func schema_resource_metadata_apis_meta_v1alpha1_ResourceCalculator(ref common.R }, "request": { SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + Ref: ref("kmodules.xyz/resource-metadata/apis/meta/v1alpha1.ResourceCalculatorRequest"), }, }, "response": { @@ -16837,7 +16889,32 @@ func schema_resource_metadata_apis_meta_v1alpha1_ResourceCalculator(ref common.R }, }, Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "k8s.io/apimachinery/pkg/runtime.RawExtension", "kmodules.xyz/resource-metadata/apis/meta/v1alpha1.ResourceCalculatorResponse"}, + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "kmodules.xyz/resource-metadata/apis/meta/v1alpha1.ResourceCalculatorRequest", "kmodules.xyz/resource-metadata/apis/meta/v1alpha1.ResourceCalculatorResponse"}, + } +} + +func schema_resource_metadata_apis_meta_v1alpha1_ResourceCalculatorRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "resource": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + }, + }, + "edit": { + SchemaProps: spec.SchemaProps{ + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/runtime.RawExtension"}, } } @@ -19028,6 +19105,46 @@ func schema_kmodulesxyz_resource_metadata_apis_shared_ActionTemplateGroup(ref co } } +func schema_kmodulesxyz_resource_metadata_apis_shared_BootstrapPresets(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "offlineInstaller": { + SchemaProps: spec.SchemaProps{ + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/resource-metadata/apis/shared.ImageRegistrySpec"), + }, + }, + "registry": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/resource-metadata/apis/shared.RegistryInfo"), + }, + }, + "helm": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/resource-metadata/apis/shared.HelmInfo"), + }, + }, + }, + Required: []string{"image", "registry", "helm"}, + }, + }, + Dependencies: []string{ + "kmodules.xyz/resource-metadata/apis/shared.HelmInfo", "kmodules.xyz/resource-metadata/apis/shared.ImageRegistrySpec", "kmodules.xyz/resource-metadata/apis/shared.RegistryInfo"}, + } +} + func schema_kmodulesxyz_resource_metadata_apis_shared_Dashboard(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -19144,6 +19261,137 @@ func schema_kmodulesxyz_resource_metadata_apis_shared_DeploymentParameters(ref c } } +func schema_kmodulesxyz_resource_metadata_apis_shared_HelmInfo(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "repositories": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("kmodules.xyz/resource-metadata/apis/shared.HelmRepository"), + }, + }, + }, + }, + }, + "releases": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("kmodules.xyz/resource-metadata/apis/shared.HelmRelease"), + }, + }, + }, + }, + }, + }, + Required: []string{"repositories", "releases"}, + }, + }, + Dependencies: []string{ + "kmodules.xyz/resource-metadata/apis/shared.HelmRelease", "kmodules.xyz/resource-metadata/apis/shared.HelmRepository"}, + } +} + +func schema_kmodulesxyz_resource_metadata_apis_shared_HelmRelease(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "enabled": { + SchemaProps: spec.SchemaProps{ + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, + "version": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "values": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + }, + }, + }, + Required: []string{"enabled", "version"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + } +} + +func schema_kmodulesxyz_resource_metadata_apis_shared_HelmRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "url": { + SchemaProps: spec.SchemaProps{ + Description: "URL of the Helm repository, a valid URL contains at least a protocol and host.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "secretName": { + SchemaProps: spec.SchemaProps{ + Description: "SecretRef specifies the Secret containing authentication credentials for the HelmRepository. For HTTP/S basic auth the secret must contain 'username' and 'password' fields. For TLS the secret must contain a 'certFile' and 'keyFile', and/or 'caFile' fields.", + Type: []string{"string"}, + Format: "", + }, + }, + "interval": { + SchemaProps: spec.SchemaProps{ + Description: "Interval at which to check the URL for updates.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "timeout": { + SchemaProps: spec.SchemaProps{ + Description: "The timeout of index downloading, defaults to 60s.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of the HelmRepository. When this field is set to \"oci\", the URL field value must be prefixed with \"oci://\".", + Type: []string{"string"}, + Format: "", + }, + }, + "provider": { + SchemaProps: spec.SchemaProps{ + Description: "Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. This field is optional, and only taken into account if the .spec.type field is set to 'oci'. When not specified, defaults to 'generic'.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"url"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + } +} + func schema_kmodulesxyz_resource_metadata_apis_shared_If(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -19169,6 +19417,113 @@ func schema_kmodulesxyz_resource_metadata_apis_shared_If(ref common.ReferenceCal } } +func schema_kmodulesxyz_resource_metadata_apis_shared_ImageRegistrySpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "proxies": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/resource-metadata/apis/shared.RegistryProxies"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "kmodules.xyz/resource-metadata/apis/shared.RegistryProxies"}, + } +} + +func schema_kmodulesxyz_resource_metadata_apis_shared_RegistryInfo(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "credentials": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func schema_kmodulesxyz_resource_metadata_apis_shared_RegistryProxies(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "dockerHub": { + SchemaProps: spec.SchemaProps{ + Description: "company/bin:1.23", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "dockerLibrary": { + SchemaProps: spec.SchemaProps{ + Description: "alpine, nginx etc.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "ghcr": { + SchemaProps: spec.SchemaProps{ + Description: "ghcr.io", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "quay": { + SchemaProps: spec.SchemaProps{ + Description: "quay.io", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kubernetes": { + SchemaProps: spec.SchemaProps{ + Description: "registry.k8s.io", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "appscode": { + SchemaProps: spec.SchemaProps{ + Description: "r.appscode.com", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_kmodulesxyz_resource_metadata_apis_shared_ResourceLocator(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/resourcecalculator_types.go b/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/resourcecalculator_types.go index 71d83901..1016d0fc 100644 --- a/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/resourcecalculator_types.go +++ b/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/resourcecalculator_types.go @@ -42,11 +42,16 @@ type ResourceCalculator struct { metav1.ObjectMeta `json:"metadata,omitempty"` // +kubebuilder:pruning:PreserveUnknownFields - Request *runtime.RawExtension `json:"request,omitempty"` + Request *ResourceCalculatorRequest `json:"request,omitempty"` // +optional Response *ResourceCalculatorResponse `json:"response,omitempty"` } +type ResourceCalculatorRequest struct { + Resource *runtime.RawExtension `json:"resource,omitempty"` + Edit bool `json:"edit,omitempty"` +} + type ResourceCalculatorResponse struct { APIType kmapi.ResourceID `json:"apiType"` // +optional diff --git a/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/resourcedescriptor_helpers.go b/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/resourcedescriptor_helpers.go index 457106dd..927893ef 100644 --- a/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/resourcedescriptor_helpers.go +++ b/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/resourcedescriptor_helpers.go @@ -17,8 +17,6 @@ limitations under the License. package v1alpha1 import ( - "strings" - kmapi "kmodules.xyz/client-go/api/v1" "kmodules.xyz/client-go/apiextensions" "kmodules.xyz/resource-metadata/crds" @@ -59,20 +57,3 @@ func (rd ResourceDescriptor) ToYAML() ([]byte, error) { return FormatMetadata(data) } - -func IsOfficialType(group string) bool { - switch { - case group == "": - return true - case !strings.ContainsRune(group, '.'): - return true - case group == "k8s.io" || strings.HasSuffix(group, ".k8s.io"): - return true - case group == "kubernetes.io" || strings.HasSuffix(group, ".kubernetes.io"): - return true - case group == "x-k8s.io" || strings.HasSuffix(group, ".x-k8s.io"): - return true - default: - return false - } -} diff --git a/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/resourcelayout_types.go b/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/resourcelayout_types.go index f05f248a..47e70d4d 100644 --- a/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/resourcelayout_types.go +++ b/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/resourcelayout_types.go @@ -78,8 +78,12 @@ type PageBlockLayout struct { Actions *ResourceActions `json:"actions,omitempty"` View *PageBlockTableDefinition `json:"view,omitempty"` + + RequiredFeatureSets map[string]FeatureList `json:"requiredFeatureSets,omitempty"` } +type FeatureList []string + type PageBlockTableDefinition struct { Columns []ResourceColumnDefinition `json:"columns,omitempty"` } diff --git a/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/resourceoutline_types.go b/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/resourceoutline_types.go index 425ba027..568d877b 100644 --- a/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/resourceoutline_types.go +++ b/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/resourceoutline_types.go @@ -88,6 +88,7 @@ type PageBlockOutline struct { DisplayMode ResourceDisplayMode `json:"displayMode,omitempty"` Actions *ResourceActions `json:"actions,omitempty"` View *ResourceTableDefinitionRef `json:"view,omitempty"` + RequiredFeatureSets map[string]FeatureList `json:"requiredFeatureSets,omitempty"` } type ResourceTableDefinitionRef struct { diff --git a/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/zz_generated.deepcopy.go b/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/zz_generated.deepcopy.go index 18bbd650..0c3c155b 100644 --- a/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/kmodules.xyz/resource-metadata/apis/meta/v1alpha1/zz_generated.deepcopy.go @@ -277,6 +277,26 @@ func (in *ExecResult) DeepCopy() *ExecResult { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in FeatureList) DeepCopyInto(out *FeatureList) { + { + in := &in + *out = make(FeatureList, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureList. +func (in FeatureList) DeepCopy() FeatureList { + if in == nil { + return nil + } + out := new(FeatureList) + in.DeepCopyInto(out) + return *out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Link) DeepCopyInto(out *Link) { *out = *in @@ -768,6 +788,21 @@ func (in *PageBlockLayout) DeepCopyInto(out *PageBlockLayout) { *out = new(PageBlockTableDefinition) (*in).DeepCopyInto(*out) } + if in.RequiredFeatureSets != nil { + in, out := &in.RequiredFeatureSets, &out.RequiredFeatureSets + *out = make(map[string]FeatureList, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(FeatureList, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } return } @@ -804,6 +839,21 @@ func (in *PageBlockOutline) DeepCopyInto(out *PageBlockOutline) { *out = new(ResourceTableDefinitionRef) (*in).DeepCopyInto(*out) } + if in.RequiredFeatureSets != nil { + in, out := &in.RequiredFeatureSets, &out.RequiredFeatureSets + *out = make(map[string]FeatureList, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(FeatureList, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } return } @@ -1332,7 +1382,7 @@ func (in *ResourceCalculator) DeepCopyInto(out *ResourceCalculator) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Request != nil { in, out := &in.Request, &out.Request - *out = new(runtime.RawExtension) + *out = new(ResourceCalculatorRequest) (*in).DeepCopyInto(*out) } if in.Response != nil { @@ -1361,6 +1411,27 @@ func (in *ResourceCalculator) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceCalculatorRequest) DeepCopyInto(out *ResourceCalculatorRequest) { + *out = *in + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceCalculatorRequest. +func (in *ResourceCalculatorRequest) DeepCopy() *ResourceCalculatorRequest { + if in == nil { + return nil + } + out := new(ResourceCalculatorRequest) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceCalculatorResponse) DeepCopyInto(out *ResourceCalculatorResponse) { *out = *in diff --git a/vendor/kmodules.xyz/resource-metadata/apis/shared/helm.go b/vendor/kmodules.xyz/resource-metadata/apis/shared/helm.go new file mode 100644 index 00000000..e24be4b7 --- /dev/null +++ b/vendor/kmodules.xyz/resource-metadata/apis/shared/helm.go @@ -0,0 +1,112 @@ +/* +Copyright AppsCode Inc. and Contributors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package shared + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +type BootstrapPresets struct { + // +optional + OfflineInstaller bool `json:"offlineInstaller"` + Image ImageRegistrySpec `json:"image"` + Registry RegistryInfo `json:"registry"` + Helm HelmInfo `json:"helm"` +} + +type ImageRegistrySpec struct { + //+optional + Proxies RegistryProxies `json:"proxies"` +} + +type RegistryProxies struct { + // company/bin:1.23 + //+optional + DockerHub string `json:"dockerHub"` + // alpine, nginx etc. + //+optional + DockerLibrary string `json:"dockerLibrary"` + // ghcr.io + //+optional + GHCR string `json:"ghcr"` + // quay.io + //+optional + Quay string `json:"quay"` + // registry.k8s.io + //+optional + Kubernetes string `json:"kubernetes"` + // r.appscode.com + //+optional + AppsCode string `json:"appscode"` +} + +type RegistryInfo struct { + //+optional + Credentials map[string]string `json:"credentials"` +} + +type HelmInfo struct { + Repositories map[string]*HelmRepository `json:"repositories"` + Releases map[string]*HelmRelease `json:"releases"` +} + +type HelmRepository struct { + // URL of the Helm repository, a valid URL contains at least a protocol and + // host. + // +required + URL string `json:"url"` + + // SecretRef specifies the Secret containing authentication credentials + // for the HelmRepository. + // For HTTP/S basic auth the secret must contain 'username' and 'password' + // fields. + // For TLS the secret must contain a 'certFile' and 'keyFile', and/or + // 'caFile' fields. + // +optional + SecretName string `json:"secretName,omitempty"` + + // Interval at which to check the URL for updates. + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$" + // +optional + Interval *metav1.Duration `json:"interval,omitempty"` + + // The timeout of index downloading, defaults to 60s. + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // Type of the HelmRepository. + // When this field is set to "oci", the URL field value must be prefixed with "oci://". + // +kubebuilder:validation:Enum=default;oci + // +optional + Type string `json:"type,omitempty"` + + // Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. + // This field is optional, and only taken into account if the .spec.type field is set to 'oci'. + // When not specified, defaults to 'generic'. + // +kubebuilder:validation:Enum=generic;aws;azure;gcp + // +kubebuilder:default:=generic + // +optional + Provider string `json:"provider,omitempty"` +} + +type HelmRelease struct { + Enabled bool `json:"enabled"` + Version string `json:"version"` + Values *runtime.RawExtension `json:"values,omitempty"` +} diff --git a/vendor/kmodules.xyz/resource-metadata/apis/shared/helpers.go b/vendor/kmodules.xyz/resource-metadata/apis/shared/helpers.go index 41d35a84..6a8475b5 100644 --- a/vendor/kmodules.xyz/resource-metadata/apis/shared/helpers.go +++ b/vendor/kmodules.xyz/resource-metadata/apis/shared/helpers.go @@ -24,6 +24,7 @@ import ( "text/template" kmapi "kmodules.xyz/client-go/api/v1" + "kmodules.xyz/go-containerregistry/name" "github.com/Masterminds/sprig/v3" "github.com/pkg/errors" @@ -91,3 +92,116 @@ func (r ResourceLocator) GraphQuery(oid kmapi.OID) (string, map[string]interface } return "", nil, fmt.Errorf("unknown query type %+v, oid %s", r, oid) } + +func (r ImageRegistrySpec) DockerHubProxy() string { + addr := r.Proxies.DockerHub + addr = strings.TrimSpace(addr) + addr = strings.TrimSuffix(addr, "/") + return addr +} + +func (r ImageRegistrySpec) DockerLibraryProxy() string { + addr := r.Proxies.DockerLibrary + if addr == "" { + addr = r.Proxies.DockerHub + } + addr = strings.TrimSpace(addr) + addr = strings.TrimSuffix(addr, "/") + return addr +} + +func (r ImageRegistrySpec) GHCRProxy() string { + addr := r.Proxies.GHCR + addr = strings.TrimSpace(addr) + addr = strings.TrimSuffix(addr, "/") + return addr +} + +func (r ImageRegistrySpec) QuayProxy() string { + addr := r.Proxies.Quay + addr = strings.TrimSpace(addr) + addr = strings.TrimSuffix(addr, "/") + return addr +} + +func (r ImageRegistrySpec) KubernetesRegistryProxy() string { + addr := r.Proxies.Kubernetes + addr = strings.TrimSpace(addr) + addr = strings.TrimSuffix(addr, "/") + return addr +} + +func (r ImageRegistrySpec) AppsCodeRegistryProxy() string { + addr := r.Proxies.AppsCode + addr = strings.TrimSpace(addr) + addr = strings.TrimSuffix(addr, "/") + return addr +} + +const defaultTag = "latest" + +func NewRef(spec ImageRegistrySpec, img string) (string, error) { + ref, err := name.ParseReference(img) + if err != nil { + return "", err + } + + // https://github.com/kmodules/go-containerregistry/blob/master/name/lib_test.go + switch ref.Registry { + case "index.docker.io": + var result string + _, bin, found := strings.Cut(ref.Repository, "library/") + if found { + addr := spec.DockerLibraryProxy() + if addr != "" { + result = addr + "/" + bin + } else { + result = bin + } + } else { + addr := spec.DockerHubProxy() + if addr != "" { + result = addr + "/" + ref.Repository + } else { + result = ref.Repository + } + } + if ref.Tag != "" && ref.Tag != defaultTag { + result += ":" + ref.Tag + } + return result, nil + case "ghcr.io": + result := spec.GHCRProxy() + "/" + ref.Repository + if ref.Tag != "" && ref.Tag != defaultTag { + result += ":" + ref.Tag + } + return result, nil + case "quay.io": + result := spec.QuayProxy() + "/" + ref.Repository + if ref.Tag != "" && ref.Tag != defaultTag { + result += ":" + ref.Tag + } + return result, nil + case "registry.k8s.io": + result := spec.KubernetesRegistryProxy() + "/" + ref.Repository + if ref.Tag != "" && ref.Tag != defaultTag { + result += ":" + ref.Tag + } + return result, nil + case "r.appscode.com": + result := spec.AppsCodeRegistryProxy() + "/" + ref.Repository + if ref.Tag != "" && ref.Tag != defaultTag { + result += ":" + ref.Tag + } + return result, nil + } + return "", fmt.Errorf("registry not support for image %s", img) +} + +func MustNewRef(spec ImageRegistrySpec, img string) string { + out, err := NewRef(spec, img) + if err != nil { + panic(err) + } + return out +} diff --git a/vendor/kmodules.xyz/resource-metadata/apis/shared/openapi_generated.go b/vendor/kmodules.xyz/resource-metadata/apis/shared/openapi_generated.go index 212960e9..44748728 100644 --- a/vendor/kmodules.xyz/resource-metadata/apis/shared/openapi_generated.go +++ b/vendor/kmodules.xyz/resource-metadata/apis/shared/openapi_generated.go @@ -35,10 +35,17 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "kmodules.xyz/resource-metadata/apis/shared.ActionInfo": schema_kmodulesxyz_resource_metadata_apis_shared_ActionInfo(ref), "kmodules.xyz/resource-metadata/apis/shared.ActionTemplate": schema_kmodulesxyz_resource_metadata_apis_shared_ActionTemplate(ref), "kmodules.xyz/resource-metadata/apis/shared.ActionTemplateGroup": schema_kmodulesxyz_resource_metadata_apis_shared_ActionTemplateGroup(ref), + "kmodules.xyz/resource-metadata/apis/shared.BootstrapPresets": schema_kmodulesxyz_resource_metadata_apis_shared_BootstrapPresets(ref), "kmodules.xyz/resource-metadata/apis/shared.Dashboard": schema_kmodulesxyz_resource_metadata_apis_shared_Dashboard(ref), "kmodules.xyz/resource-metadata/apis/shared.DashboardVar": schema_kmodulesxyz_resource_metadata_apis_shared_DashboardVar(ref), "kmodules.xyz/resource-metadata/apis/shared.DeploymentParameters": schema_kmodulesxyz_resource_metadata_apis_shared_DeploymentParameters(ref), + "kmodules.xyz/resource-metadata/apis/shared.HelmInfo": schema_kmodulesxyz_resource_metadata_apis_shared_HelmInfo(ref), + "kmodules.xyz/resource-metadata/apis/shared.HelmRelease": schema_kmodulesxyz_resource_metadata_apis_shared_HelmRelease(ref), + "kmodules.xyz/resource-metadata/apis/shared.HelmRepository": schema_kmodulesxyz_resource_metadata_apis_shared_HelmRepository(ref), "kmodules.xyz/resource-metadata/apis/shared.If": schema_kmodulesxyz_resource_metadata_apis_shared_If(ref), + "kmodules.xyz/resource-metadata/apis/shared.ImageRegistrySpec": schema_kmodulesxyz_resource_metadata_apis_shared_ImageRegistrySpec(ref), + "kmodules.xyz/resource-metadata/apis/shared.RegistryInfo": schema_kmodulesxyz_resource_metadata_apis_shared_RegistryInfo(ref), + "kmodules.xyz/resource-metadata/apis/shared.RegistryProxies": schema_kmodulesxyz_resource_metadata_apis_shared_RegistryProxies(ref), "kmodules.xyz/resource-metadata/apis/shared.ResourceLocator": schema_kmodulesxyz_resource_metadata_apis_shared_ResourceLocator(ref), "kmodules.xyz/resource-metadata/apis/shared.ResourceQuery": schema_kmodulesxyz_resource_metadata_apis_shared_ResourceQuery(ref), "kmodules.xyz/resource-metadata/apis/shared.SourceLocator": schema_kmodulesxyz_resource_metadata_apis_shared_SourceLocator(ref), @@ -289,6 +296,46 @@ func schema_kmodulesxyz_resource_metadata_apis_shared_ActionTemplateGroup(ref co } } +func schema_kmodulesxyz_resource_metadata_apis_shared_BootstrapPresets(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "offlineInstaller": { + SchemaProps: spec.SchemaProps{ + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/resource-metadata/apis/shared.ImageRegistrySpec"), + }, + }, + "registry": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/resource-metadata/apis/shared.RegistryInfo"), + }, + }, + "helm": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/resource-metadata/apis/shared.HelmInfo"), + }, + }, + }, + Required: []string{"image", "registry", "helm"}, + }, + }, + Dependencies: []string{ + "kmodules.xyz/resource-metadata/apis/shared.HelmInfo", "kmodules.xyz/resource-metadata/apis/shared.ImageRegistrySpec", "kmodules.xyz/resource-metadata/apis/shared.RegistryInfo"}, + } +} + func schema_kmodulesxyz_resource_metadata_apis_shared_Dashboard(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -405,6 +452,137 @@ func schema_kmodulesxyz_resource_metadata_apis_shared_DeploymentParameters(ref c } } +func schema_kmodulesxyz_resource_metadata_apis_shared_HelmInfo(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "repositories": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("kmodules.xyz/resource-metadata/apis/shared.HelmRepository"), + }, + }, + }, + }, + }, + "releases": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("kmodules.xyz/resource-metadata/apis/shared.HelmRelease"), + }, + }, + }, + }, + }, + }, + Required: []string{"repositories", "releases"}, + }, + }, + Dependencies: []string{ + "kmodules.xyz/resource-metadata/apis/shared.HelmRelease", "kmodules.xyz/resource-metadata/apis/shared.HelmRepository"}, + } +} + +func schema_kmodulesxyz_resource_metadata_apis_shared_HelmRelease(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "enabled": { + SchemaProps: spec.SchemaProps{ + Default: false, + Type: []string{"boolean"}, + Format: "", + }, + }, + "version": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "values": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"), + }, + }, + }, + Required: []string{"enabled", "version"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/runtime.RawExtension"}, + } +} + +func schema_kmodulesxyz_resource_metadata_apis_shared_HelmRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "url": { + SchemaProps: spec.SchemaProps{ + Description: "URL of the Helm repository, a valid URL contains at least a protocol and host.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "secretName": { + SchemaProps: spec.SchemaProps{ + Description: "SecretRef specifies the Secret containing authentication credentials for the HelmRepository. For HTTP/S basic auth the secret must contain 'username' and 'password' fields. For TLS the secret must contain a 'certFile' and 'keyFile', and/or 'caFile' fields.", + Type: []string{"string"}, + Format: "", + }, + }, + "interval": { + SchemaProps: spec.SchemaProps{ + Description: "Interval at which to check the URL for updates.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "timeout": { + SchemaProps: spec.SchemaProps{ + Description: "The timeout of index downloading, defaults to 60s.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of the HelmRepository. When this field is set to \"oci\", the URL field value must be prefixed with \"oci://\".", + Type: []string{"string"}, + Format: "", + }, + }, + "provider": { + SchemaProps: spec.SchemaProps{ + Description: "Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'. This field is optional, and only taken into account if the .spec.type field is set to 'oci'. When not specified, defaults to 'generic'.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"url"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + } +} + func schema_kmodulesxyz_resource_metadata_apis_shared_If(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -430,6 +608,113 @@ func schema_kmodulesxyz_resource_metadata_apis_shared_If(ref common.ReferenceCal } } +func schema_kmodulesxyz_resource_metadata_apis_shared_ImageRegistrySpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "proxies": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("kmodules.xyz/resource-metadata/apis/shared.RegistryProxies"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "kmodules.xyz/resource-metadata/apis/shared.RegistryProxies"}, + } +} + +func schema_kmodulesxyz_resource_metadata_apis_shared_RegistryInfo(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "credentials": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func schema_kmodulesxyz_resource_metadata_apis_shared_RegistryProxies(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "dockerHub": { + SchemaProps: spec.SchemaProps{ + Description: "company/bin:1.23", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "dockerLibrary": { + SchemaProps: spec.SchemaProps{ + Description: "alpine, nginx etc.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "ghcr": { + SchemaProps: spec.SchemaProps{ + Description: "ghcr.io", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "quay": { + SchemaProps: spec.SchemaProps{ + Description: "quay.io", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "kubernetes": { + SchemaProps: spec.SchemaProps{ + Description: "registry.k8s.io", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "appscode": { + SchemaProps: spec.SchemaProps{ + Description: "r.appscode.com", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_kmodulesxyz_resource_metadata_apis_shared_ResourceLocator(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/kmodules.xyz/resource-metadata/apis/shared/zz_generated.deepcopy.go b/vendor/kmodules.xyz/resource-metadata/apis/shared/zz_generated.deepcopy.go index f7f82bd5..39939a07 100644 --- a/vendor/kmodules.xyz/resource-metadata/apis/shared/zz_generated.deepcopy.go +++ b/vendor/kmodules.xyz/resource-metadata/apis/shared/zz_generated.deepcopy.go @@ -22,6 +22,8 @@ limitations under the License. package shared import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" v1alpha1 "x-helm.dev/apimachinery/apis/releases/v1alpha1" apisshared "x-helm.dev/apimachinery/apis/shared" ) @@ -144,6 +146,25 @@ func (in *ActionTemplateGroup) DeepCopy() *ActionTemplateGroup { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BootstrapPresets) DeepCopyInto(out *BootstrapPresets) { + *out = *in + out.Image = in.Image + in.Registry.DeepCopyInto(&out.Registry) + in.Helm.DeepCopyInto(&out.Helm) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootstrapPresets. +func (in *BootstrapPresets) DeepCopy() *BootstrapPresets { + if in == nil { + return nil + } + out := new(BootstrapPresets) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Dashboard) DeepCopyInto(out *Dashboard) { *out = *in @@ -212,6 +233,99 @@ func (in *DeploymentParameters) DeepCopy() *DeploymentParameters { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmInfo) DeepCopyInto(out *HelmInfo) { + *out = *in + if in.Repositories != nil { + in, out := &in.Repositories, &out.Repositories + *out = make(map[string]*HelmRepository, len(*in)) + for key, val := range *in { + var outVal *HelmRepository + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(HelmRepository) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + if in.Releases != nil { + in, out := &in.Releases, &out.Releases + *out = make(map[string]*HelmRelease, len(*in)) + for key, val := range *in { + var outVal *HelmRelease + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(HelmRelease) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmInfo. +func (in *HelmInfo) DeepCopy() *HelmInfo { + if in == nil { + return nil + } + out := new(HelmInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRelease) DeepCopyInto(out *HelmRelease) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRelease. +func (in *HelmRelease) DeepCopy() *HelmRelease { + if in == nil { + return nil + } + out := new(HelmRelease) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRepository) DeepCopyInto(out *HelmRepository) { + *out = *in + if in.Interval != nil { + in, out := &in.Interval, &out.Interval + *out = new(v1.Duration) + **out = **in + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepository. +func (in *HelmRepository) DeepCopy() *HelmRepository { + if in == nil { + return nil + } + out := new(HelmRepository) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *If) DeepCopyInto(out *If) { *out = *in @@ -233,6 +347,62 @@ func (in *If) DeepCopy() *If { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistrySpec) DeepCopyInto(out *ImageRegistrySpec) { + *out = *in + out.Proxies = in.Proxies + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistrySpec. +func (in *ImageRegistrySpec) DeepCopy() *ImageRegistrySpec { + if in == nil { + return nil + } + out := new(ImageRegistrySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryInfo) DeepCopyInto(out *RegistryInfo) { + *out = *in + if in.Credentials != nil { + in, out := &in.Credentials, &out.Credentials + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryInfo. +func (in *RegistryInfo) DeepCopy() *RegistryInfo { + if in == nil { + return nil + } + out := new(RegistryInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryProxies) DeepCopyInto(out *RegistryProxies) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryProxies. +func (in *RegistryProxies) DeepCopy() *RegistryProxies { + if in == nil { + return nil + } + out := new(RegistryProxies) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceLocator) DeepCopyInto(out *ResourceLocator) { *out = *in diff --git a/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourceblockdefinitions.yaml b/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourceblockdefinitions.yaml index d72ba010..74d388f1 100644 --- a/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourceblockdefinitions.yaml +++ b/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourceblockdefinitions.yaml @@ -84,7 +84,7 @@ spec: query: properties: byLabel: - description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,storage,view) + description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,source,storage,view) enum: - authn - authz @@ -106,6 +106,7 @@ spec: - recommended_for - restore_into - scaled_by + - source - storage - view type: string @@ -132,6 +133,12 @@ spec: - group - kind type: object + requiredFeatureSets: + additionalProperties: + items: + type: string + type: array + type: object view: properties: columns: diff --git a/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourcecalculators.yaml b/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourcecalculators.yaml index 2a796bd9..a6d14ba8 100644 --- a/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourcecalculators.yaml +++ b/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourcecalculators.yaml @@ -31,6 +31,12 @@ spec: metadata: type: object request: + properties: + edit: + type: boolean + resource: + type: object + x-kubernetes-preserve-unknown-fields: true type: object x-kubernetes-preserve-unknown-fields: true response: diff --git a/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourcedescriptors.yaml b/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourcedescriptors.yaml index b606bd11..19d0917b 100644 --- a/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourcedescriptors.yaml +++ b/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourcedescriptors.yaml @@ -46,7 +46,7 @@ spec: properties: labels: items: - description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,storage,view) + description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,source,storage,view) enum: - authn - authz @@ -68,6 +68,7 @@ spec: - recommended_for - restore_into - scaled_by + - source - storage - view type: string @@ -205,7 +206,7 @@ spec: query: properties: byLabel: - description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,storage,view) + description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,source,storage,view) enum: - authn - authz @@ -227,6 +228,7 @@ spec: - recommended_for - restore_into - scaled_by + - source - storage - view type: string diff --git a/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourcelayouts.yaml b/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourcelayouts.yaml index f9be1fb5..57f05b90 100644 --- a/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourcelayouts.yaml +++ b/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourcelayouts.yaml @@ -85,7 +85,7 @@ spec: query: properties: byLabel: - description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,storage,view) + description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,source,storage,view) enum: - authn - authz @@ -107,6 +107,7 @@ spec: - recommended_for - restore_into - scaled_by + - source - storage - view type: string @@ -133,6 +134,12 @@ spec: - group - kind type: object + requiredFeatureSets: + additionalProperties: + items: + type: string + type: array + type: object view: properties: columns: @@ -322,7 +329,7 @@ spec: query: properties: byLabel: - description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,storage,view) + description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,source,storage,view) enum: - authn - authz @@ -344,6 +351,7 @@ spec: - recommended_for - restore_into - scaled_by + - source - storage - view type: string @@ -371,6 +379,12 @@ spec: - group - kind type: object + requiredFeatureSets: + additionalProperties: + items: + type: string + type: array + type: object view: properties: columns: @@ -585,7 +599,7 @@ spec: query: properties: byLabel: - description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,storage,view) + description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,source,storage,view) enum: - authn - authz @@ -607,6 +621,7 @@ spec: - recommended_for - restore_into - scaled_by + - source - storage - view type: string @@ -634,6 +649,12 @@ spec: - group - kind type: object + requiredFeatureSets: + additionalProperties: + items: + type: string + type: array + type: object view: properties: columns: @@ -821,7 +842,7 @@ spec: query: properties: byLabel: - description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,storage,view) + description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,source,storage,view) enum: - authn - authz @@ -843,6 +864,7 @@ spec: - recommended_for - restore_into - scaled_by + - source - storage - view type: string @@ -870,6 +892,12 @@ spec: - group - kind type: object + requiredFeatureSets: + additionalProperties: + items: + type: string + type: array + type: object view: properties: columns: @@ -1087,7 +1115,7 @@ spec: query: properties: byLabel: - description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,storage,view) + description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,source,storage,view) enum: - authn - authz @@ -1109,6 +1137,7 @@ spec: - recommended_for - restore_into - scaled_by + - source - storage - view type: string @@ -1135,6 +1164,12 @@ spec: - group - kind type: object + requiredFeatureSets: + additionalProperties: + items: + type: string + type: array + type: object view: properties: columns: diff --git a/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourceoutlines.yaml b/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourceoutlines.yaml index 89901379..7757a247 100644 --- a/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourceoutlines.yaml +++ b/vendor/kmodules.xyz/resource-metadata/crds/meta.k8s.appscode.com_resourceoutlines.yaml @@ -172,7 +172,7 @@ spec: query: properties: byLabel: - description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,storage,view) + description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,source,storage,view) enum: - authn - authz @@ -194,6 +194,7 @@ spec: - recommended_for - restore_into - scaled_by + - source - storage - view type: string @@ -220,6 +221,12 @@ spec: - group - kind type: object + requiredFeatureSets: + additionalProperties: + items: + type: string + type: array + type: object view: properties: columns: @@ -411,7 +418,7 @@ spec: query: properties: byLabel: - description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,storage,view) + description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,source,storage,view) enum: - authn - authz @@ -433,6 +440,7 @@ spec: - recommended_for - restore_into - scaled_by + - source - storage - view type: string @@ -460,6 +468,12 @@ spec: - group - kind type: object + requiredFeatureSets: + additionalProperties: + items: + type: string + type: array + type: object view: properties: columns: @@ -676,7 +690,7 @@ spec: query: properties: byLabel: - description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,storage,view) + description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,source,storage,view) enum: - authn - authz @@ -698,6 +712,7 @@ spec: - recommended_for - restore_into - scaled_by + - source - storage - view type: string @@ -725,6 +740,12 @@ spec: - group - kind type: object + requiredFeatureSets: + additionalProperties: + items: + type: string + type: array + type: object view: properties: columns: @@ -914,7 +935,7 @@ spec: query: properties: byLabel: - description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,storage,view) + description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,source,storage,view) enum: - authn - authz @@ -936,6 +957,7 @@ spec: - recommended_for - restore_into - scaled_by + - source - storage - view type: string @@ -963,6 +985,12 @@ spec: - group - kind type: object + requiredFeatureSets: + additionalProperties: + items: + type: string + type: array + type: object view: properties: columns: @@ -1182,7 +1210,7 @@ spec: query: properties: byLabel: - description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,storage,view) + description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,source,storage,view) enum: - authn - authz @@ -1204,6 +1232,7 @@ spec: - recommended_for - restore_into - scaled_by + - source - storage - view type: string @@ -1230,6 +1259,12 @@ spec: - group - kind type: object + requiredFeatureSets: + additionalProperties: + items: + type: string + type: array + type: object view: properties: columns: diff --git a/vendor/kmodules.xyz/resource-metadata/crds/node.k8s.appscode.com_nodetopologies.yaml b/vendor/kmodules.xyz/resource-metadata/crds/node.k8s.appscode.com_nodetopologies.yaml new file mode 100644 index 00000000..d6ef9809 --- /dev/null +++ b/vendor/kmodules.xyz/resource-metadata/crds/node.k8s.appscode.com_nodetopologies.yaml @@ -0,0 +1,77 @@ + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: nodetopologies.node.k8s.appscode.com +spec: + group: node.k8s.appscode.com + names: + kind: NodeTopology + listKind: NodeTopologyList + plural: nodetopologies + singular: nodetopology + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + nodeGroups: + items: + properties: + allocatable: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Allocatable represents the total resources of + a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity' + type: object + topologyValue: + type: string + required: + - allocatable + - topologyValue + type: object + type: array + nodeSelectionPolicy: + enum: + - LabelSelector + - Taint + type: string + topologyKey: + type: string + required: + - nodeSelectionPolicy + - topologyKey + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/kmodules.xyz/resource-metadata/crds/ui.k8s.appscode.com_resourcedashboards.yaml b/vendor/kmodules.xyz/resource-metadata/crds/ui.k8s.appscode.com_resourcedashboards.yaml index 724bc221..745fde73 100644 --- a/vendor/kmodules.xyz/resource-metadata/crds/ui.k8s.appscode.com_resourcedashboards.yaml +++ b/vendor/kmodules.xyz/resource-metadata/crds/ui.k8s.appscode.com_resourcedashboards.yaml @@ -51,7 +51,7 @@ spec: query: properties: byLabel: - description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,storage,view) + description: ENUM(authn,authz,auth_secret,backup_via,catalog,cert_issuer,config,connect_via,exposed_by,event,located_on,monitored_by,ocm_bind,offshoot,ops,placed_into,policy,recommended_for,restore_into,scaled_by,source,storage,view) enum: - authn - authz @@ -73,6 +73,7 @@ spec: - recommended_for - restore_into - scaled_by + - source - storage - view type: string diff --git a/vendor/kmodules.xyz/resource-metrics/api/utils.go b/vendor/kmodules.xyz/resource-metrics/api/utils.go index 015f8f54..f16f962f 100644 --- a/vendor/kmodules.xyz/resource-metrics/api/utils.go +++ b/vendor/kmodules.xyz/resource-metrics/api/utils.go @@ -85,6 +85,33 @@ func AddResourceList(x, y core.ResourceList) core.ResourceList { return result } +func SubtractResourceList(x, y core.ResourceList) core.ResourceList { + names := sets.NewString() + for k := range x { + names.Insert(string(k)) + } + for k := range y { + names.Insert(string(k)) + } + + result := core.ResourceList{} + for _, fullName := range names.UnsortedList() { + _, name, found := strings.Cut(fullName, ".") + var rf resource.Format + if found { + rf = resourceFormat(core.ResourceName(name)) + } else { + rf = resourceFormat(core.ResourceName(fullName)) + } + + sum := resource.Quantity{Format: rf} + sum.Add(*x.Name(core.ResourceName(fullName), rf)) + sum.Sub(*y.Name(core.ResourceName(fullName), rf)) + result[core.ResourceName(fullName)] = sum + } + return result +} + func resourceFormat(name core.ResourceName) resource.Format { switch name { case core.ResourceCPU: diff --git a/vendor/modules.txt b/vendor/modules.txt index b9f8ff25..880da8b6 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,7 +1,7 @@ -# cloud.google.com/go/compute v1.13.0 +# cloud.google.com/go/compute v1.21.0 ## explicit; go 1.19 cloud.google.com/go/compute/internal -# cloud.google.com/go/compute/metadata v0.2.2 +# cloud.google.com/go/compute/metadata v0.2.3 ## explicit; go 1.19 cloud.google.com/go/compute/metadata # github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0 @@ -119,6 +119,10 @@ github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/containerd/console v1.0.3 ## explicit; go 1.13 github.com/containerd/console +# github.com/containerd/stargz-snapshotter/estargz v0.12.1 +## explicit; go 1.16 +github.com/containerd/stargz-snapshotter/estargz +github.com/containerd/stargz-snapshotter/estargz/errorutil # github.com/creack/goselect v0.1.2 ## explicit; go 1.13 github.com/creack/goselect @@ -131,9 +135,23 @@ github.com/digitalocean/godo github.com/digitalocean/godo/metrics # github.com/dnaeon/go-vcr v1.2.0 ## explicit; go 1.15 +# github.com/docker/cli v20.10.22+incompatible +## explicit +github.com/docker/cli/cli/config +github.com/docker/cli/cli/config/configfile +github.com/docker/cli/cli/config/credentials +github.com/docker/cli/cli/config/types +# github.com/docker/distribution v2.8.1+incompatible +## explicit +github.com/docker/distribution/registry/client/auth/challenge # github.com/docker/docker v20.10.21+incompatible ## explicit +github.com/docker/docker/pkg/homedir github.com/docker/docker/pkg/namesgenerator +# github.com/docker/docker-credential-helpers v0.7.0 +## explicit; go 1.18 +github.com/docker/docker-credential-helpers/client +github.com/docker/docker-credential-helpers/credentials # github.com/dustin/go-humanize v1.0.1-0.20220316001817-d5090ed65664 ## explicit; go 1.16 github.com/dustin/go-humanize @@ -182,8 +200,6 @@ github.com/gogo/protobuf/sortkeys # github.com/golang-jwt/jwt v3.2.1+incompatible ## explicit github.com/golang-jwt/jwt -# github.com/golang-jwt/jwt/v4 v4.4.2 -## explicit; go 1.16 # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru @@ -209,6 +225,37 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value +# github.com/google/go-containerregistry v0.13.0 +## explicit; go 1.18 +github.com/google/go-containerregistry/internal/and +github.com/google/go-containerregistry/internal/compression +github.com/google/go-containerregistry/internal/estargz +github.com/google/go-containerregistry/internal/gzip +github.com/google/go-containerregistry/internal/legacy +github.com/google/go-containerregistry/internal/redact +github.com/google/go-containerregistry/internal/retry +github.com/google/go-containerregistry/internal/retry/wait +github.com/google/go-containerregistry/internal/verify +github.com/google/go-containerregistry/internal/windows +github.com/google/go-containerregistry/internal/zstd +github.com/google/go-containerregistry/pkg/authn +github.com/google/go-containerregistry/pkg/compression +github.com/google/go-containerregistry/pkg/crane +github.com/google/go-containerregistry/pkg/legacy +github.com/google/go-containerregistry/pkg/legacy/tarball +github.com/google/go-containerregistry/pkg/logs +github.com/google/go-containerregistry/pkg/name +github.com/google/go-containerregistry/pkg/v1 +github.com/google/go-containerregistry/pkg/v1/empty +github.com/google/go-containerregistry/pkg/v1/layout +github.com/google/go-containerregistry/pkg/v1/match +github.com/google/go-containerregistry/pkg/v1/mutate +github.com/google/go-containerregistry/pkg/v1/partial +github.com/google/go-containerregistry/pkg/v1/remote +github.com/google/go-containerregistry/pkg/v1/remote/transport +github.com/google/go-containerregistry/pkg/v1/stream +github.com/google/go-containerregistry/pkg/v1/tarball +github.com/google/go-containerregistry/pkg/v1/types # github.com/google/go-querystring v1.1.0 ## explicit; go 1.10 github.com/google/go-querystring/query @@ -216,14 +263,36 @@ github.com/google/go-querystring/query ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource +# github.com/google/s2a-go v0.1.4 +## explicit; go 1.16 +github.com/google/s2a-go +github.com/google/s2a-go/fallback +github.com/google/s2a-go/internal/authinfo +github.com/google/s2a-go/internal/handshaker +github.com/google/s2a-go/internal/handshaker/service +github.com/google/s2a-go/internal/proto/common_go_proto +github.com/google/s2a-go/internal/proto/s2a_context_go_proto +github.com/google/s2a-go/internal/proto/s2a_go_proto +github.com/google/s2a-go/internal/proto/v2/common_go_proto +github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto +github.com/google/s2a-go/internal/proto/v2/s2a_go_proto +github.com/google/s2a-go/internal/record +github.com/google/s2a-go/internal/record/internal/aeadcrypter +github.com/google/s2a-go/internal/record/internal/halfconn +github.com/google/s2a-go/internal/tokenmanager +github.com/google/s2a-go/internal/v2 +github.com/google/s2a-go/internal/v2/certverifier +github.com/google/s2a-go/internal/v2/remotesigner +github.com/google/s2a-go/internal/v2/tlsconfigstore +github.com/google/s2a-go/stream # github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid -# github.com/googleapis/enterprise-certificate-proxy v0.2.0 -## explicit; go 1.18 +# github.com/googleapis/enterprise-certificate-proxy v0.2.3 +## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.7.0 +# github.com/googleapis/gax-go/v2 v2.11.0 ## explicit; go 1.19 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror @@ -253,6 +322,15 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go +# github.com/klauspost/compress v1.15.11 +## explicit; go 1.17 +github.com/klauspost/compress +github.com/klauspost/compress/fse +github.com/klauspost/compress/huff0 +github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/snapref +github.com/klauspost/compress/zstd +github.com/klauspost/compress/zstd/internal/xxhash # github.com/kylelemons/godebug v1.1.0 ## explicit; go 1.11 github.com/kylelemons/godebug/diff @@ -276,6 +354,9 @@ github.com/mattn/go-isatty # github.com/mitchellh/copystructure v1.2.0 ## explicit; go 1.15 github.com/mitchellh/copystructure +# github.com/mitchellh/go-homedir v1.1.0 +## explicit +github.com/mitchellh/go-homedir # github.com/mitchellh/mapstructure v1.5.0 ## explicit; go 1.14 github.com/mitchellh/mapstructure @@ -294,6 +375,13 @@ github.com/moul/gotty-client # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg +# github.com/opencontainers/go-digest v1.0.0 +## explicit; go 1.13 +github.com/opencontainers/go-digest +# github.com/opencontainers/image-spec v1.1.0-rc2 +## explicit; go 1.17 +github.com/opencontainers/image-spec/specs-go +github.com/opencontainers/image-spec/specs-go/v1 # github.com/packethost/packngo v0.13.0 ## explicit; go 1.15 github.com/packethost/packngo @@ -331,6 +419,9 @@ github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag +# github.com/vbatts/tar-split v0.11.2 +## explicit; go 1.15 +github.com/vbatts/tar-split/archive/tar # github.com/yudai/gojsondiff v1.0.0 ## explicit github.com/yudai/gojsondiff @@ -356,14 +447,17 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# golang.org/x/crypto v0.13.0 +# golang.org/x/crypto v0.14.0 ## explicit; go 1.17 golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 +golang.org/x/crypto/chacha20poly1305 +golang.org/x/crypto/cryptobyte +golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/curve25519 golang.org/x/crypto/curve25519/internal/field -golang.org/x/crypto/ed25519 +golang.org/x/crypto/hkdf golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/pbkdf2 @@ -373,7 +467,7 @@ golang.org/x/crypto/scrypt golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/terminal -# golang.org/x/net v0.15.0 +# golang.org/x/net v0.17.0 ## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/http/httpguts @@ -383,26 +477,28 @@ golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.7.0 -## explicit; go 1.17 +# golang.org/x/oauth2 v0.13.0 +## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/google golang.org/x/oauth2/google/internal/externalaccount +golang.org/x/oauth2/google/internal/externalaccountauthorizeduser +golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.1.0 -## explicit +# golang.org/x/sync v0.4.0 +## explicit; go 1.17 golang.org/x/sync/errgroup -# golang.org/x/sys v0.12.0 +# golang.org/x/sys v0.13.0 ## explicit; go 1.17 golang.org/x/sys/cpu -golang.org/x/sys/internal/unsafeheader +golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/term v0.12.0 +# golang.org/x/term v0.13.0 ## explicit; go 1.17 golang.org/x/term # golang.org/x/text v0.13.0 @@ -452,21 +548,20 @@ gomodules.xyz/wait gomodules.xyz/x/env gomodules.xyz/x/term gomodules.xyz/x/version -# google.golang.org/api v0.104.0 +# google.golang.org/api v0.126.0 ## explicit; go 1.19 google.golang.org/api/compute/v1 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport google.golang.org/api/internal +google.golang.org/api/internal/cert google.golang.org/api/internal/gensupport google.golang.org/api/internal/impersonate google.golang.org/api/internal/third_party/uritemplates google.golang.org/api/option google.golang.org/api/option/internaloption -google.golang.org/api/transport/cert google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -google.golang.org/api/transport/internal/dca # google.golang.org/appengine v1.6.7 ## explicit; go 1.11 google.golang.org/appengine @@ -477,15 +572,17 @@ google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api +google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch +google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20221207170731-23e4bf6bdc37 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.51.0 -## explicit; go 1.17 +# google.golang.org/grpc v1.58.3 +## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -515,6 +612,7 @@ google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/idle google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver @@ -534,7 +632,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.28.1 +# google.golang.org/protobuf v1.31.0 ## explicit; go 1.11 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext @@ -836,7 +934,7 @@ k8s.io/utils/internal/third_party/forked/golang/net k8s.io/utils/net k8s.io/utils/pointer k8s.io/utils/strings/slices -# kmodules.xyz/client-go v0.25.38 +# kmodules.xyz/client-go v0.25.44 ## explicit; go 1.18 kmodules.xyz/client-go kmodules.xyz/client-go/api/v1 @@ -846,16 +944,19 @@ kmodules.xyz/client-go/meta # kmodules.xyz/crd-schema-fuzz v0.25.0 ## explicit; go 1.18 kmodules.xyz/crd-schema-fuzz +# kmodules.xyz/go-containerregistry v0.0.11 +## explicit; go 1.18 +kmodules.xyz/go-containerregistry/name # kmodules.xyz/offshoot-api v0.25.0 ## explicit; go 1.18 kmodules.xyz/offshoot-api/api/v1 -# kmodules.xyz/resource-metadata v0.17.26-0.20231009112556-f90c55e7954d +# kmodules.xyz/resource-metadata v0.17.44 ## explicit; go 1.18 kmodules.xyz/resource-metadata/apis/meta kmodules.xyz/resource-metadata/apis/meta/v1alpha1 kmodules.xyz/resource-metadata/apis/shared kmodules.xyz/resource-metadata/crds -# kmodules.xyz/resource-metrics v0.25.3 +# kmodules.xyz/resource-metrics v0.25.7 ## explicit; go 1.18 kmodules.xyz/resource-metrics/api # moul.io/anonuuid v1.3.2 @@ -884,7 +985,7 @@ sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.3.0 ## explicit; go 1.12 sigs.k8s.io/yaml -# x-helm.dev/apimachinery v0.0.12 +# x-helm.dev/apimachinery v0.0.15 ## explicit; go 1.19 x-helm.dev/apimachinery/apis/charts/v1alpha1 x-helm.dev/apimachinery/apis/releases/v1alpha1 diff --git a/vendor/x-helm.dev/apimachinery/apis/releases/v1alpha1/editor_types.go b/vendor/x-helm.dev/apimachinery/apis/releases/v1alpha1/editor_types.go index 413b321a..1569922e 100644 --- a/vendor/x-helm.dev/apimachinery/apis/releases/v1alpha1/editor_types.go +++ b/vendor/x-helm.dev/apimachinery/apis/releases/v1alpha1/editor_types.go @@ -143,11 +143,13 @@ type ObjectModel struct { type ResourceObject struct { Filename string `json:"filename,omitempty"` + Key string `json:"key,omitempty"` Data *unstructured.Unstructured `json:"data,omitempty"` } type ResourceFile struct { Filename string `json:"filename,omitempty"` + Key string `json:"key,omitempty"` Data string `json:"data,omitempty"` }