From 996688b25e46a094ab5c136cbf3003d98505a8bc Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Sat, 13 Apr 2024 16:01:13 -0400 Subject: [PATCH 01/18] bump deps Signed-off-by: Sam Batschelet --- go.mod | 7 +- go.sum | 6 + .../gogo/protobuf/gogoproto/Makefile | 37 + .../github.com/gogo/protobuf/gogoproto/doc.go | 169 + .../gogo/protobuf/gogoproto/gogo.pb.go | 874 + .../gogo/protobuf/gogoproto/gogo.pb.golden | 45 + .../gogo/protobuf/gogoproto/gogo.proto | 144 + .../gogo/protobuf/gogoproto/helper.go | 415 + .../protoc-gen-gogo/descriptor/Makefile | 36 + .../protoc-gen-gogo/descriptor/descriptor.go | 118 + .../descriptor/descriptor.pb.go | 2865 + .../descriptor/descriptor_gostring.gen.go | 752 + .../protoc-gen-gogo/descriptor/helper.go | 390 + .../golang/protobuf/jsonpb/decode.go | 530 + .../golang/protobuf/jsonpb/encode.go | 559 + .../github.com/golang/protobuf/jsonpb/json.go | 69 + .../x/net/internal/timeseries/timeseries.go | 525 + vendor/golang.org/x/net/trace/events.go | 532 + vendor/golang.org/x/net/trace/histogram.go | 365 + vendor/golang.org/x/net/trace/trace.go | 1130 + .../genproto/googleapis/rpc/LICENSE | 202 + .../googleapis/rpc/status/status.pb.go | 203 + vendor/google.golang.org/grpc/AUTHORS | 1 + .../google.golang.org/grpc/CODE-OF-CONDUCT.md | 3 + vendor/google.golang.org/grpc/CONTRIBUTING.md | 73 + vendor/google.golang.org/grpc/GOVERNANCE.md | 1 + vendor/google.golang.org/grpc/LICENSE | 202 + vendor/google.golang.org/grpc/MAINTAINERS.md | 28 + vendor/google.golang.org/grpc/Makefile | 46 + vendor/google.golang.org/grpc/NOTICE.txt | 13 + vendor/google.golang.org/grpc/README.md | 107 + vendor/google.golang.org/grpc/SECURITY.md | 3 + .../grpc/attributes/attributes.go | 141 + vendor/google.golang.org/grpc/backoff.go | 61 + .../google.golang.org/grpc/backoff/backoff.go | 52 + .../grpc/balancer/balancer.go | 427 + .../grpc/balancer/base/balancer.go | 264 + .../grpc/balancer/base/base.go | 71 + .../grpc/balancer/conn_state_evaluator.go | 74 + .../grpc/balancer/grpclb/state/state.go | 51 + .../grpc/balancer/roundrobin/roundrobin.go | 81 + .../grpc/balancer_conn_wrappers.go | 454 + .../grpc_binarylog_v1/binarylog.pb.go | 1183 + vendor/google.golang.org/grpc/call.go | 74 + .../grpc/channelz/channelz.go | 36 + vendor/google.golang.org/grpc/clientconn.go | 2032 + vendor/google.golang.org/grpc/codec.go | 50 + vendor/google.golang.org/grpc/codegen.sh | 17 + .../grpc/codes/code_string.go | 111 + vendor/google.golang.org/grpc/codes/codes.go | 244 + .../grpc/connectivity/connectivity.go | 94 + .../grpc/credentials/credentials.go | 291 + .../grpc/credentials/insecure/insecure.go | 98 + .../google.golang.org/grpc/credentials/tls.go | 236 + vendor/google.golang.org/grpc/dialoptions.go | 715 + vendor/google.golang.org/grpc/doc.go | 26 + .../grpc/encoding/encoding.go | 135 + .../grpc/encoding/proto/proto.go | 58 + .../grpc/grpclog/component.go | 117 + .../google.golang.org/grpc/grpclog/grpclog.go | 132 + .../google.golang.org/grpc/grpclog/logger.go | 87 + .../grpc/grpclog/loggerv2.go | 258 + vendor/google.golang.org/grpc/interceptor.go | 104 + .../grpc/internal/backoff/backoff.go | 73 + .../balancer/gracefulswitch/gracefulswitch.go | 385 + .../grpc/internal/balancerload/load.go | 46 + .../grpc/internal/binarylog/binarylog.go | 192 + .../internal/binarylog/binarylog_testutil.go | 42 + .../grpc/internal/binarylog/env_config.go | 208 + .../grpc/internal/binarylog/method_logger.go | 445 + .../grpc/internal/binarylog/sink.go | 170 + .../grpc/internal/buffer/unbounded.go | 105 + .../grpc/internal/channelz/funcs.go | 756 + .../grpc/internal/channelz/id.go | 75 + .../grpc/internal/channelz/logging.go | 79 + .../grpc/internal/channelz/types.go | 727 + .../grpc/internal/channelz/types_linux.go | 51 + .../grpc/internal/channelz/types_nonlinux.go | 43 + .../grpc/internal/channelz/util_linux.go | 37 + .../grpc/internal/channelz/util_nonlinux.go | 27 + .../grpc/internal/credentials/credentials.go | 49 + .../grpc/internal/credentials/spiffe.go | 75 + .../grpc/internal/credentials/syscallconn.go | 58 + .../grpc/internal/credentials/util.go | 52 + .../grpc/internal/envconfig/envconfig.go | 72 + .../grpc/internal/envconfig/observability.go | 42 + .../grpc/internal/envconfig/xds.go | 95 + .../grpc/internal/grpclog/grpclog.go | 126 + .../grpc/internal/grpclog/prefixLogger.go | 93 + .../grpc/internal/grpcrand/grpcrand.go | 95 + .../internal/grpcsync/callback_serializer.go | 125 + .../grpc/internal/grpcsync/event.go | 61 + .../grpc/internal/grpcsync/oncefunc.go | 32 + .../grpc/internal/grpcsync/pubsub.go | 121 + .../grpc/internal/grpcutil/compressor.go | 47 + .../grpc/internal/grpcutil/encode_duration.go | 63 + .../grpc/internal/grpcutil/grpcutil.go | 20 + .../grpc/internal/grpcutil/metadata.go | 40 + .../grpc/internal/grpcutil/method.go | 88 + .../grpc/internal/grpcutil/regex.go | 31 + .../grpc/internal/idle/idle.go | 301 + .../grpc/internal/internal.go | 205 + .../grpc/internal/metadata/metadata.go | 132 + .../grpc/internal/pretty/pretty.go | 82 + .../grpc/internal/resolver/config_selector.go | 167 + .../internal/resolver/dns/dns_resolver.go | 470 + .../resolver/passthrough/passthrough.go | 64 + .../grpc/internal/resolver/unix/unix.go | 74 + .../grpc/internal/serviceconfig/duration.go | 130 + .../internal/serviceconfig/serviceconfig.go | 180 + .../grpc/internal/status/status.go | 176 + .../grpc/internal/syscall/syscall_linux.go | 112 + .../grpc/internal/syscall/syscall_nonlinux.go | 77 + .../grpc/internal/transport/bdp_estimator.go | 141 + .../grpc/internal/transport/controlbuf.go | 1007 + .../grpc/internal/transport/defaults.go | 55 + .../grpc/internal/transport/flowcontrol.go | 215 + .../grpc/internal/transport/handler_server.go | 480 + .../grpc/internal/transport/http2_client.go | 1797 + .../grpc/internal/transport/http2_server.go | 1464 + .../grpc/internal/transport/http_util.go | 479 + .../grpc/internal/transport/logging.go | 40 + .../transport/networktype/networktype.go | 46 + .../grpc/internal/transport/proxy.go | 142 + .../grpc/internal/transport/transport.go | 837 + .../grpc/internal/xds_handshake_cluster.go | 40 + .../grpc/keepalive/keepalive.go | 85 + .../grpc/metadata/metadata.go | 295 + vendor/google.golang.org/grpc/peer/peer.go | 51 + .../google.golang.org/grpc/picker_wrapper.go | 236 + vendor/google.golang.org/grpc/pickfirst.go | 263 + vendor/google.golang.org/grpc/preloader.go | 67 + vendor/google.golang.org/grpc/regenerate.sh | 123 + vendor/google.golang.org/grpc/resolver/map.go | 138 + .../grpc/resolver/resolver.go | 316 + .../grpc/resolver_conn_wrapper.go | 247 + vendor/google.golang.org/grpc/rpc_util.go | 959 + vendor/google.golang.org/grpc/server.go | 2122 + .../google.golang.org/grpc/service_config.go | 347 + .../grpc/serviceconfig/serviceconfig.go | 44 + .../grpc/shared_buffer_pool.go | 154 + .../google.golang.org/grpc/stats/handlers.go | 63 + vendor/google.golang.org/grpc/stats/stats.go | 343 + .../google.golang.org/grpc/status/status.go | 162 + vendor/google.golang.org/grpc/stream.go | 1780 + vendor/google.golang.org/grpc/tap/tap.go | 56 + vendor/google.golang.org/grpc/trace.go | 123 + vendor/google.golang.org/grpc/version.go | 22 + vendor/google.golang.org/grpc/vet.sh | 209 + .../protobuf/encoding/protojson/decode.go | 665 + .../protobuf/encoding/protojson/doc.go | 11 + .../protobuf/encoding/protojson/encode.go | 349 + .../encoding/protojson/well_known_types.go | 895 + .../protobuf/internal/encoding/json/decode.go | 340 + .../internal/encoding/json/decode_number.go | 254 + .../internal/encoding/json/decode_string.go | 91 + .../internal/encoding/json/decode_token.go | 192 + .../protobuf/internal/encoding/json/encode.go | 278 + .../k8s.io/code-generator/generate-groups.sh | 0 .../generate-internal-groups.sh | 0 vendor/k8s.io/cri-api/LICENSE | 201 + .../cri-api/pkg/apis/runtime/v1/api.pb.go | 47530 ++++++++++++++++ .../cri-api/pkg/apis/runtime/v1/api.proto | 1863 + .../cri-api/pkg/apis/runtime/v1/constants.go | 55 + .../pkg/apis/testing/fake_image_service.go | 256 + .../pkg/apis/testing/fake_runtime_service.go | 796 + .../k8s.io/cri-api/pkg/apis/testing/utils.go | 48 + vendor/modules.txt | 65 + 168 files changed, 94075 insertions(+), 2 deletions(-) create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/Makefile create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/doc.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/gogo.proto create mode 100644 vendor/github.com/gogo/protobuf/gogoproto/helper.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/decode.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/encode.go create mode 100644 vendor/github.com/golang/protobuf/jsonpb/json.go create mode 100644 vendor/golang.org/x/net/internal/timeseries/timeseries.go create mode 100644 vendor/golang.org/x/net/trace/events.go create mode 100644 vendor/golang.org/x/net/trace/histogram.go create mode 100644 vendor/golang.org/x/net/trace/trace.go create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/LICENSE create mode 100644 vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go create mode 100644 vendor/google.golang.org/grpc/AUTHORS create mode 100644 vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md create mode 100644 vendor/google.golang.org/grpc/CONTRIBUTING.md create mode 100644 vendor/google.golang.org/grpc/GOVERNANCE.md create mode 100644 vendor/google.golang.org/grpc/LICENSE create mode 100644 vendor/google.golang.org/grpc/MAINTAINERS.md create mode 100644 vendor/google.golang.org/grpc/Makefile create mode 100644 vendor/google.golang.org/grpc/NOTICE.txt create mode 100644 vendor/google.golang.org/grpc/README.md create mode 100644 vendor/google.golang.org/grpc/SECURITY.md create mode 100644 vendor/google.golang.org/grpc/attributes/attributes.go create mode 100644 vendor/google.golang.org/grpc/backoff.go create mode 100644 vendor/google.golang.org/grpc/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/balancer/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/balancer.go create mode 100644 vendor/google.golang.org/grpc/balancer/base/base.go create mode 100644 vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go create mode 100644 vendor/google.golang.org/grpc/balancer/grpclb/state/state.go create mode 100644 vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go create mode 100644 vendor/google.golang.org/grpc/balancer_conn_wrappers.go create mode 100644 vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go create mode 100644 vendor/google.golang.org/grpc/call.go create mode 100644 vendor/google.golang.org/grpc/channelz/channelz.go create mode 100644 vendor/google.golang.org/grpc/clientconn.go create mode 100644 vendor/google.golang.org/grpc/codec.go create mode 100644 vendor/google.golang.org/grpc/codegen.sh create mode 100644 vendor/google.golang.org/grpc/codes/code_string.go create mode 100644 vendor/google.golang.org/grpc/codes/codes.go create mode 100644 vendor/google.golang.org/grpc/connectivity/connectivity.go create mode 100644 vendor/google.golang.org/grpc/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/credentials/insecure/insecure.go create mode 100644 vendor/google.golang.org/grpc/credentials/tls.go create mode 100644 vendor/google.golang.org/grpc/dialoptions.go create mode 100644 vendor/google.golang.org/grpc/doc.go create mode 100644 vendor/google.golang.org/grpc/encoding/encoding.go create mode 100644 vendor/google.golang.org/grpc/encoding/proto/proto.go create mode 100644 vendor/google.golang.org/grpc/grpclog/component.go create mode 100644 vendor/google.golang.org/grpc/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/grpclog/logger.go create mode 100644 vendor/google.golang.org/grpc/grpclog/loggerv2.go create mode 100644 vendor/google.golang.org/grpc/interceptor.go create mode 100644 vendor/google.golang.org/grpc/internal/backoff/backoff.go create mode 100644 vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go create mode 100644 vendor/google.golang.org/grpc/internal/balancerload/load.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/env_config.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/method_logger.go create mode 100644 vendor/google.golang.org/grpc/internal/binarylog/sink.go create mode 100644 vendor/google.golang.org/grpc/internal/buffer/unbounded.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/funcs.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/id.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/logging.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/credentials.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/spiffe.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/syscallconn.go create mode 100644 vendor/google.golang.org/grpc/internal/credentials/util.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/envconfig.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/observability.go create mode 100644 vendor/google.golang.org/grpc/internal/envconfig/xds.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/event.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/compressor.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/metadata.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/method.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/regex.go create mode 100644 vendor/google.golang.org/grpc/internal/idle/idle.go create mode 100644 vendor/google.golang.org/grpc/internal/internal.go create mode 100644 vendor/google.golang.org/grpc/internal/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/internal/pretty/pretty.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/config_selector.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go create mode 100644 vendor/google.golang.org/grpc/internal/resolver/unix/unix.go create mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/duration.go create mode 100644 vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go create mode 100644 vendor/google.golang.org/grpc/internal/status/status.go create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go create mode 100644 vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/controlbuf.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/defaults.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/flowcontrol.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/handler_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_client.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http2_server.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/http_util.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/logging.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/proxy.go create mode 100644 vendor/google.golang.org/grpc/internal/transport/transport.go create mode 100644 vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go create mode 100644 vendor/google.golang.org/grpc/keepalive/keepalive.go create mode 100644 vendor/google.golang.org/grpc/metadata/metadata.go create mode 100644 vendor/google.golang.org/grpc/peer/peer.go create mode 100644 vendor/google.golang.org/grpc/picker_wrapper.go create mode 100644 vendor/google.golang.org/grpc/pickfirst.go create mode 100644 vendor/google.golang.org/grpc/preloader.go create mode 100644 vendor/google.golang.org/grpc/regenerate.sh create mode 100644 vendor/google.golang.org/grpc/resolver/map.go create mode 100644 vendor/google.golang.org/grpc/resolver/resolver.go create mode 100644 vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 vendor/google.golang.org/grpc/rpc_util.go create mode 100644 vendor/google.golang.org/grpc/server.go create mode 100644 vendor/google.golang.org/grpc/service_config.go create mode 100644 vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go create mode 100644 vendor/google.golang.org/grpc/shared_buffer_pool.go create mode 100644 vendor/google.golang.org/grpc/stats/handlers.go create mode 100644 vendor/google.golang.org/grpc/stats/stats.go create mode 100644 vendor/google.golang.org/grpc/status/status.go create mode 100644 vendor/google.golang.org/grpc/stream.go create mode 100644 vendor/google.golang.org/grpc/tap/tap.go create mode 100644 vendor/google.golang.org/grpc/trace.go create mode 100644 vendor/google.golang.org/grpc/version.go create mode 100644 vendor/google.golang.org/grpc/vet.sh create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/decode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/doc.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/encode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/json/encode.go mode change 100755 => 100644 vendor/k8s.io/code-generator/generate-groups.sh mode change 100755 => 100644 vendor/k8s.io/code-generator/generate-internal-groups.sh create mode 100644 vendor/k8s.io/cri-api/LICENSE create mode 100644 vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.pb.go create mode 100644 vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto create mode 100644 vendor/k8s.io/cri-api/pkg/apis/runtime/v1/constants.go create mode 100644 vendor/k8s.io/cri-api/pkg/apis/testing/fake_image_service.go create mode 100644 vendor/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go create mode 100644 vendor/k8s.io/cri-api/pkg/apis/testing/utils.go diff --git a/go.mod b/go.mod index 4db13ed48b..fbda09ef66 100644 --- a/go.mod +++ b/go.mod @@ -39,12 +39,14 @@ require ( github.com/vincent-petithory/dataurl v1.0.0 golang.org/x/net v0.19.0 golang.org/x/time v0.3.0 + google.golang.org/grpc v1.58.3 k8s.io/api v0.29.2 k8s.io/apiextensions-apiserver v0.29.2 k8s.io/apimachinery v0.29.2 k8s.io/client-go v0.29.2 k8s.io/code-generator v0.29.2 k8s.io/component-base v0.29.2 + k8s.io/cri-api v0.29.2 k8s.io/kubectl v0.29.2 k8s.io/kubelet v0.29.2 k8s.io/utils v0.0.0-20230726121419-3b25d923346b @@ -106,6 +108,7 @@ require ( go.mongodb.org/mongo-driver v1.11.3 // indirect go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.tmz.dev/musttag v0.7.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 // indirect gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect k8s.io/cli-runtime v0.29.2 // indirect @@ -176,7 +179,7 @@ require ( github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da github.com/golang/protobuf v1.5.3 // indirect github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect @@ -242,7 +245,7 @@ require ( github.com/nishanths/exhaustive v0.11.0 // indirect github.com/nishanths/predeclared v0.2.2 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/opencontainers/image-spec v1.1.0-rc5 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 github.com/opencontainers/runc v1.1.10 // indirect github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect diff --git a/go.sum b/go.sum index 35e55944fb..afa6ab27b2 100644 --- a/go.sum +++ b/go.sum @@ -1338,6 +1338,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1354,6 +1356,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1430,6 +1434,8 @@ k8s.io/code-generator v0.29.2 h1:c9/iw2KnNpw2IRV+wwuG/Wns2TjPSgjWzbbjTevyiHI= k8s.io/code-generator v0.29.2/go.mod h1:FwFi3C9jCrmbPjekhaCYcYG1n07CYiW1+PAPCockaos= k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8= k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= +k8s.io/cri-api v0.29.2 h1:LLSeWVC3h1nVMpV9vHiE+mO3spDYmz/C0GvxH6p6tkg= +k8s.io/cri-api v0.29.2/go.mod h1:9fQTFm+wi4FLyqrkVUoMJiUB3mE74XrVvHz8uFY/sSw= k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile new file mode 100644 index 0000000000..0b4659b731 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/Makefile @@ -0,0 +1,37 @@ +# Protocol Buffers for Go with Gadgets +# +# Copyright (c) 2013, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto + +restore: + cp gogo.pb.golden gogo.pb.go + +preserve: + cp gogo.pb.go gogo.pb.golden diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 0000000000..081c86fa8e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,169 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independent of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 0000000000..1e91766aee --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,874 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gogo.proto + +package gogoproto + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname", + Filename: "gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all", + Filename: "gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all", + Filename: "gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all", + Filename: "gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all", + Filename: "gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all", + Filename: "gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all", + Filename: "gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all", + Filename: "gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all", + Filename: "gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all", + Filename: "gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all", + Filename: "gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all", + Filename: "gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all", + Filename: "gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all", + Filename: "gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all", + Filename: "gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all", + Filename: "gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all", + Filename: "gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all", + Filename: "gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all", + Filename: "gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import", + Filename: "gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all", + Filename: "gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration", + Filename: "gogo.proto", +} + +var E_MessagenameAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63033, + Name: "gogoproto.messagename_all", + Tag: "varint,63033,opt,name=messagename_all", + Filename: "gogo.proto", +} + +var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63034, + Name: "gogoproto.goproto_sizecache_all", + Tag: "varint,63034,opt,name=goproto_sizecache_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63035, + Name: "gogoproto.goproto_unkeyed_all", + Tag: "varint,63035,opt,name=goproto_unkeyed_all", + Filename: "gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters", + Filename: "gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer", + Filename: "gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal", + Filename: "gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler", + Filename: "gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized", + Filename: "gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", +} + +var E_Messagename = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64033, + Name: "gogoproto.messagename", + Tag: "varint,64033,opt,name=messagename", + Filename: "gogo.proto", +} + +var E_GoprotoSizecache = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64034, + Name: "gogoproto.goproto_sizecache", + Tag: "varint,64034,opt,name=goproto_sizecache", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64035, + Name: "gogoproto.goproto_unkeyed", + Tag: "varint,64035,opt,name=goproto_unkeyed", + Filename: "gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", +} + +var E_Wktpointer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65012, + Name: "gogoproto.wktpointer", + Tag: "varint,65012,opt,name=wktpointer", + Filename: "gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_MessagenameAll) + proto.RegisterExtension(E_GoprotoSizecacheAll) + proto.RegisterExtension(E_GoprotoUnkeyedAll) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Messagename) + proto.RegisterExtension(E_GoprotoSizecache) + proto.RegisterExtension(E_GoprotoUnkeyed) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) + proto.RegisterExtension(E_Wktpointer) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } + +var fileDescriptor_592445b5231bc2b9 = []byte{ + // 1328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, + 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, + 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, + 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, + 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, + 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, + 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, + 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, + 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, + 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, + 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, + 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, + 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, + 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, + 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, + 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, + 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, + 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, + 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, + 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, + 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, + 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, + 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, + 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, + 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, + 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, + 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, + 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, + 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, + 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, + 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, + 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, + 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, + 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, + 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, + 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, + 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, + 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, + 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, + 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, + 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, + 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, + 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, + 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, + 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, + 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, + 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, + 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, + 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, + 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, + 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, + 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, + 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, + 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, + 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, + 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, + 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, + 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, + 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, + 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, + 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, + 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, + 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, + 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, + 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, + 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, + 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, + 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, + 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, + 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, + 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, + 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, + 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, + 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, + 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, + 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, + 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, + 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, + 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, + 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, + 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, + 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, + 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden new file mode 100644 index 0000000000..f6502e4b90 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go. +// source: gogo.proto +// DO NOT EDIT! + +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import json "encoding/json" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51235, + Name: "gogoproto.nullable", + Tag: "varint,51235,opt,name=nullable", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51236, + Name: "gogoproto.embed", + Tag: "varint,51236,opt,name=embed", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 51237, + Name: "gogoproto.customtype", + Tag: "bytes,51237,opt,name=customtype", +} + +func init() { + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto new file mode 100644 index 0000000000..b80c85653f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -0,0 +1,144 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; + +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 0000000000..390d4e4be6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,415 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + +func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" +} + +func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" +} + +func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" +} + +func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" +} + +func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" +} + +func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" +} + +func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" +} + +func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" +} + +func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" +} + +func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { + return (IsStdTime(field) || IsStdDuration(field) || + IsStdDouble(field) || IsStdFloat(field) || + IsStdInt64(field) || IsStdUInt64(field) || + IsStdInt32(field) || IsStdUInt32(field) || + IsStdBool(field) || + IsStdString(field) || IsStdBytes(field)) +} + +func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) +} + +func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { + nullable := IsNullable(field) + if field.IsMessage() || IsCustomType(field) { + return nullable + } + if proto3 { + return false + } + return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { + name := GetEnumCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { + name := GetEnumValueCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} + +func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) +} + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} + +func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) +} + +func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) +} + +func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile new file mode 100644 index 0000000000..3496dc99d5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile @@ -0,0 +1,36 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + go install github.com/gogo/protobuf/protoc-gen-gostring + protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto + protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 0000000000..a85bf1984c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 0000000000..18b2a3318a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,2865 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be + // used for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementations still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendant. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } + +var fileDescriptor_308767df5ffe18af = []byte{ + // 2522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, + 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, + 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, + 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, + 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, + 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, + 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, + 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, + 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, + 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, + 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, + 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, + 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, + 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, + 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, + 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, + 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, + 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, + 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, + 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, + 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, + 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, + 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, + 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, + 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, + 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, + 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, + 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, + 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, + 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, + 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, + 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, + 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, + 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, + 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, + 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, + 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, + 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, + 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, + 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, + 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, + 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, + 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, + 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, + 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, + 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, + 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, + 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, + 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, + 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, + 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, + 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, + 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, + 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, + 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, + 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, + 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, + 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, + 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, + 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, + 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, + 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, + 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, + 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, + 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, + 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, + 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, + 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, + 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, + 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, + 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, + 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, + 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, + 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, + 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, + 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, + 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, + 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, + 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, + 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, + 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, + 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, + 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, + 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, + 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, + 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, + 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, + 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, + 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, + 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, + 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, + 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, + 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, + 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, + 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, + 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, + 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, + 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, + 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, + 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, + 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, + 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, + 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, + 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, + 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, + 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, + 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, + 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, + 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, + 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, + 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, + 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, + 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, + 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, + 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, + 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, + 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, + 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, + 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, + 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, + 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, + 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, + 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, + 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, + 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, + 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, + 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, + 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, + 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, + 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, + 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, + 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, + 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, + 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, + 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, + 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, + 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, + 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, + 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, + 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, + 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, + 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, + 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, + 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, + 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, + 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, + 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, + 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, + 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, + 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, + 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, + 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, + 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, + 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, + 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go new file mode 100644 index 0000000000..165b2110df --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -0,0 +1,752 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" + math "math" + reflect "reflect" + sort "sort" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 25) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") + } + if this.PhpMetadataNamespace != nil { + s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") + } + if this.RubyPackage != nil { + s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { + e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) + if e == nil { + return "nil" + } + s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "})" + return s +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 0000000000..e0846a357d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,390 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) + if nes != nil { + return nes + } + } + return nil +} + +func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) + if res != nil { + return res + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsBool() bool { + return *f.Type == FieldDescriptorProto_TYPE_BOOL +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/decode.go b/vendor/github.com/golang/protobuf/jsonpb/decode.go new file mode 100644 index 0000000000..6c16c255ff --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/decode.go @@ -0,0 +1,530 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONUnmarshalV2 = false + +// UnmarshalNext unmarshals the next JSON object from d into m. +func UnmarshalNext(d *json.Decoder, m proto.Message) error { + return new(Unmarshaler).UnmarshalNext(d, m) +} + +// Unmarshal unmarshals a JSON object from r into m. +func Unmarshal(r io.Reader, m proto.Message) error { + return new(Unmarshaler).Unmarshal(r, m) +} + +// UnmarshalString unmarshals a JSON object from s into m. +func UnmarshalString(s string, m proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(s), m) +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // AllowUnknownFields specifies whether to allow messages to contain + // unknown JSON fields, as opposed to failing to unmarshal. + AllowUnknownFields bool + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize the way +// they are unmarshaled from JSON. Messages that implement this should also +// implement JSONPBMarshaler so that the custom format can be produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Unmarshal unmarshals a JSON object from r into m. +func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error { + return u.UnmarshalNext(json.NewDecoder(r), m) +} + +// UnmarshalNext unmarshals the next JSON object from d into m. +func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error { + if m == nil { + return errors.New("invalid nil message") + } + + // Parse the next JSON object from the stream. + raw := json.RawMessage{} + if err := d.Decode(&raw); err != nil { + return err + } + + // Check for custom unmarshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsu, ok := m.(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, raw) + } + + mr := proto.MessageReflect(m) + + // NOTE: For historical reasons, a top-level null is treated as a noop. + // This is incorrect, but kept for compatibility. + if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" { + return nil + } + + if wrapJSONUnmarshalV2 { + // NOTE: If input message is non-empty, we need to preserve merge semantics + // of the old jsonpb implementation. These semantics are not supported by + // the protobuf JSON specification. + isEmpty := true + mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool { + isEmpty = false // at least one iteration implies non-empty + return false + }) + if !isEmpty { + // Perform unmarshaling into a newly allocated, empty message. + mr = mr.New() + + // Use a defer to copy all unmarshaled fields into the original message. + dst := proto.MessageReflect(m) + defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + dst.Set(fd, v) + return true + }) + } + + // Unmarshal using the v2 JSON unmarshaler. + opts := protojson.UnmarshalOptions{ + DiscardUnknown: u.AllowUnknownFields, + } + if u.AnyResolver != nil { + opts.Resolver = anyResolver{u.AnyResolver} + } + return opts.Unmarshal(raw, mr.Interface()) + } else { + if err := u.unmarshalMessage(mr, raw); err != nil { + return err + } + return protoV2.CheckInitialized(mr.Interface()) + } +} + +func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error { + md := m.Descriptor() + fds := md.Fields() + + if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, in) + } + + if string(in) == "null" && md.FullName() != "google.protobuf.Value" { + return nil + } + + switch wellKnownType(md.FullName()) { + case "Any": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + rawTypeURL, ok := jsonObject["@type"] + if !ok { + return errors.New("Any JSON doesn't have '@type'") + } + typeURL, err := unquoteString(string(rawTypeURL)) + if err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL) + } + m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL)) + + var m2 protoreflect.Message + if u.AnyResolver != nil { + mi, err := u.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + if err == protoregistry.NotFound { + return fmt.Errorf("could not resolve Any message type: %v", typeURL) + } + return err + } + m2 = mt.New() + } + + if wellKnownType(m2.Descriptor().FullName()) != "" { + rawValue, ok := jsonObject["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + if err := u.unmarshalMessage(m2, rawValue); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } else { + delete(jsonObject, "@type") + rawJSON, err := json.Marshal(jsonObject) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + if err = u.unmarshalMessage(m2, rawJSON); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err) + } + } + + rawWire, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire)) + return nil + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + v, err := u.unmarshalValue(m.NewField(fd), in, fd) + if err != nil { + return err + } + m.Set(fd, v) + return nil + case "Duration": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + d, err := time.ParseDuration(v) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + sec := d.Nanoseconds() / 1e9 + nsec := d.Nanoseconds() % 1e9 + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Timestamp": + v, err := unquoteString(string(in)) + if err != nil { + return err + } + t, err := time.Parse(time.RFC3339Nano, v) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + sec := t.Unix() + nsec := t.Nanosecond() + m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec))) + m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec))) + return nil + case "Value": + switch { + case string(in) == "null": + m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0)) + case string(in) == "true": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true)) + case string(in) == "false": + m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false)) + case hasPrefixAndSuffix('"', in, '"'): + s, err := unquoteString(string(in)) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s)) + case hasPrefixAndSuffix('[', in, ']'): + v := m.Mutable(fds.ByNumber(6)) + return u.unmarshalMessage(v.Message(), in) + case hasPrefixAndSuffix('{', in, '}'): + v := m.Mutable(fds.ByNumber(5)) + return u.unmarshalMessage(v.Message(), in) + default: + f, err := strconv.ParseFloat(string(in), 0) + if err != nil { + return fmt.Errorf("unrecognized type for Value %q", in) + } + m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f)) + } + return nil + case "ListValue": + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + lv := m.Mutable(fds.ByNumber(1)).List() + for _, raw := range jsonArray { + ve := lv.NewElement() + if err := u.unmarshalMessage(ve.Message(), raw); err != nil { + return err + } + lv.Append(ve) + } + return nil + case "Struct": + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + mv := m.Mutable(fds.ByNumber(1)).Map() + for key, raw := range jsonObject { + kv := protoreflect.ValueOf(key).MapKey() + vv := mv.NewValue() + if err := u.unmarshalMessage(vv.Message(), raw); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", key, err) + } + mv.Set(kv, vv) + } + return nil + } + + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return err + } + + // Handle known fields. + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if fd.IsWeak() && fd.Message().IsPlaceholder() { + continue // weak reference is not linked in + } + + // Search for any raw JSON value associated with this field. + var raw json.RawMessage + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + name = string(fd.JSONName()) + if v, ok := jsonObject[name]; ok { + delete(jsonObject, name) + raw = v + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + // Handle extension fields. + for name, raw := range jsonObject { + if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") { + continue + } + + // Resolve the extension field by name. + xname := protoreflect.FullName(name[len("[") : len(name)-len("]")]) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(md) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + continue + } + delete(jsonObject, name) + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName()) + } + + field := m.NewField(fd) + // Unmarshal the field value. + if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) { + continue + } + v, err := u.unmarshalValue(field, raw, fd) + if err != nil { + return err + } + m.Set(fd, v) + } + + if !u.AllowUnknownFields && len(jsonObject) > 0 { + for name := range jsonObject { + return fmt.Errorf("unknown field %q in %v", name, md.FullName()) + } + } + return nil +} + +func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool { + if fd.Cardinality() == protoreflect.Repeated { + return false + } + if md := fd.Message(); md != nil { + return md.FullName() == "google.protobuf.Value" + } + if ed := fd.Enum(); ed != nil { + return ed.FullName() == "google.protobuf.NullValue" + } + return false +} + +func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool { + if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated { + _, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler) + return ok + } + return false +} + +func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch { + case fd.IsList(): + var jsonArray []json.RawMessage + if err := json.Unmarshal(in, &jsonArray); err != nil { + return v, err + } + lv := v.List() + for _, raw := range jsonArray { + ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd) + if err != nil { + return v, err + } + lv.Append(ve) + } + return v, nil + case fd.IsMap(): + var jsonObject map[string]json.RawMessage + if err := json.Unmarshal(in, &jsonObject); err != nil { + return v, err + } + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + for key, raw := range jsonObject { + var kv protoreflect.MapKey + if kfd.Kind() == protoreflect.StringKind { + kv = protoreflect.ValueOf(key).MapKey() + } else { + v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd) + if err != nil { + return v, err + } + kv = v.MapKey() + } + + vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd) + if err != nil { + return v, err + } + mv.Set(kv, vv) + } + return v, nil + default: + return u.unmarshalSingularValue(v, in, fd) + } +} + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(+1), + `"-Infinity"`: math.Inf(-1), +} + +func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + return unmarshalValue(in, new(bool)) + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + return unmarshalValue(trimQuote(in), new(int32)) + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return unmarshalValue(trimQuote(in), new(int64)) + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + return unmarshalValue(trimQuote(in), new(uint32)) + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return unmarshalValue(trimQuote(in), new(uint64)) + case protoreflect.FloatKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat32(float32(f)), nil + } + return unmarshalValue(trimQuote(in), new(float32)) + case protoreflect.DoubleKind: + if f, ok := nonFinite[string(in)]; ok { + return protoreflect.ValueOfFloat64(float64(f)), nil + } + return unmarshalValue(trimQuote(in), new(float64)) + case protoreflect.StringKind: + return unmarshalValue(in, new(string)) + case protoreflect.BytesKind: + return unmarshalValue(in, new([]byte)) + case protoreflect.EnumKind: + if hasPrefixAndSuffix('"', in, '"') { + vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in))) + if vd == nil { + return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName()) + } + return protoreflect.ValueOfEnum(vd.Number()), nil + } + return unmarshalValue(in, new(protoreflect.EnumNumber)) + case protoreflect.MessageKind, protoreflect.GroupKind: + err := u.unmarshalMessage(v.Message(), in) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } +} + +func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) { + err := json.Unmarshal(in, v) + return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err +} + +func unquoteString(in string) (out string, err error) { + err = json.Unmarshal([]byte(in), &out) + return out, err +} + +func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool { + if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix { + return true + } + return false +} + +// trimQuote is like unquoteString but simply strips surrounding quotes. +// This is incorrect, but is behavior done by the legacy implementation. +func trimQuote(in []byte) []byte { + if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' { + in = in[1 : len(in)-1] + } + return in +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/encode.go b/vendor/github.com/golang/protobuf/jsonpb/encode.go new file mode 100644 index 0000000000..685c80a62b --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/encode.go @@ -0,0 +1,559 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package jsonpb + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapJSONMarshalV2 = false + +// Marshaler is a configurable object for marshaling protocol buffer messages +// to the specified JSON representation. +type Marshaler struct { + // OrigName specifies whether to use the original protobuf name for fields. + OrigName bool + + // EnumsAsInts specifies whether to render enum values as integers, + // as opposed to string values. + EnumsAsInts bool + + // EmitDefaults specifies whether to render fields with zero values. + EmitDefaults bool + + // Indent controls whether the output is compact or not. + // If empty, the output is compact JSON. Otherwise, every JSON object + // entry and JSON array value will be on its own line. + // Each line will be preceded by repeated copies of Indent, where the + // number of copies is the current indentation depth. + Indent string + + // AnyResolver is used to resolve the google.protobuf.Any well-known type. + // If unset, the global registry is used by default. + AnyResolver AnyResolver +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should also +// implement JSONPBUnmarshaler so that the custom format can be parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +// +// Deprecated: Custom types should implement protobuf reflection instead. +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// Marshal serializes a protobuf message as JSON into w. +func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error { + b, err := jm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// MarshalToString serializes a protobuf message as JSON in string form. +func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) { + b, err := jm.marshal(m) + if err != nil { + return "", err + } + return string(b), nil +} + +func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) { + v := reflect.ValueOf(m) + if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, errors.New("Marshal called with nil") + } + + // Check for custom marshalers first since they may not properly + // implement protobuf reflection that the logic below relies on. + if jsm, ok := m.(JSONPBMarshaler); ok { + return jsm.MarshalJSONPB(jm) + } + + if wrapJSONMarshalV2 { + opts := protojson.MarshalOptions{ + UseProtoNames: jm.OrigName, + UseEnumNumbers: jm.EnumsAsInts, + EmitUnpopulated: jm.EmitDefaults, + Indent: jm.Indent, + } + if jm.AnyResolver != nil { + opts.Resolver = anyResolver{jm.AnyResolver} + } + return opts.Marshal(proto.MessageReflect(m).Interface()) + } else { + // Check for unpopulated required fields first. + m2 := proto.MessageReflect(m) + if err := protoV2.CheckInitialized(m2.Interface()); err != nil { + return nil, err + } + + w := jsonWriter{Marshaler: jm} + err := w.marshalMessage(m2, "", "") + return w.buf, err + } +} + +type jsonWriter struct { + *Marshaler + buf []byte +} + +func (w *jsonWriter) write(s string) { + w.buf = append(w.buf, s...) +} + +func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error { + if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(w.Marshaler) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + w.write(string(b)) + return nil + } + + md := m.Descriptor() + fds := md.Fields() + + // Handle well-known types. + const secondInNanos = int64(time.Second / time.Nanosecond) + switch wellKnownType(md.FullName()) { + case "Any": + return w.marshalAny(m, indent) + case "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue": + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + case "Duration": + const maxSecondsInDuration = 315576000000 + // "Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + var sign string + if s < 0 || ns < 0 { + sign, s, ns = "-", -1*s, -1*ns + } + x := fmt.Sprintf("%s%d.%09d", sign, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vs"`, x)) + return nil + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s := m.Get(fds.ByNumber(1)).Int() + ns := m.Get(fds.ByNumber(2)).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + w.write(fmt.Sprintf(`"%vZ"`, x)) + return nil + case "Value": + // JSON value; which is a null, number, string, bool, object, or array. + od := md.Oneofs().Get(0) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("nil Value") + } + return w.marshalValue(fd, m.Get(fd), indent) + case "Struct", "ListValue": + // JSON object or array. + fd := fds.ByNumber(1) + return w.marshalValue(fd, m.Get(fd), indent) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + + firstField := true + if typeURL != "" { + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + if fd == nil { + continue + } + } else { + i++ + } + + v := m.Get(fd) + + if !m.Has(fd) { + if !w.EmitDefaults || fd.ContainingOneof() != nil { + continue + } + if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) { + v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars + } + } + + if !firstField { + w.writeComma() + } + if err := w.marshalField(fd, v, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if md.ExtensionRanges().Len() > 0 { + // Collect a sorted list of all extension descriptor and values. + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + if !firstField { + w.writeComma() + } + if err := w.marshalField(ext.desc, ext.val, indent); err != nil { + return err + } + firstField = false + } + } + + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) writeComma() { + if w.Indent != "" { + w.write(",\n") + } else { + w.write(",") + } +} + +func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + md := m.Descriptor() + typeURL := m.Get(md.Fields().ByNumber(1)).String() + rawVal := m.Get(md.Fields().ByNumber(2)).Bytes() + + var m2 protoreflect.Message + if w.AnyResolver != nil { + mi, err := w.AnyResolver.Resolve(typeURL) + if err != nil { + return err + } + m2 = proto.MessageReflect(mi) + } else { + mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL) + if err != nil { + return err + } + m2 = mt.New() + } + + if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil { + return err + } + + if wellKnownType(m2.Descriptor().FullName()) == "" { + return w.marshalMessage(m2, indent, typeURL) + } + + w.write("{") + if w.Indent != "" { + w.write("\n") + } + if err := w.marshalTypeURL(indent, typeURL); err != nil { + return err + } + w.writeComma() + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + w.write(`"value": `) + } else { + w.write(`"value":`) + } + if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil { + return err + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + } + w.write("}") + return nil +} + +func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"@type":`) + if w.Indent != "" { + w.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + w.write(string(b)) + return nil +} + +// marshalField writes field description and value to the Writer. +func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + if w.Indent != "" { + w.write(indent) + w.write(w.Indent) + } + w.write(`"`) + switch { + case fd.IsExtension(): + // For message set, use the fname of the message as the extension name. + name := string(fd.FullName()) + if isMessageSet(fd.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + w.write("[" + name + "]") + case w.OrigName: + name := string(fd.Name()) + if fd.Kind() == protoreflect.GroupKind { + name = string(fd.Message().Name()) + } + w.write(name) + default: + w.write(string(fd.JSONName())) + } + w.write(`":`) + if w.Indent != "" { + w.write(" ") + } + return w.marshalValue(fd, v, indent) +} + +func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case fd.IsList(): + w.write("[") + comma := "" + lv := v.List() + for i := 0; i < lv.Len(); i++ { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write("]") + return nil + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := v.Map() + + // Collect a sorted list of all map keys and values. + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + + w.write(`{`) + comma := "" + for _, entry := range entries { + w.write(comma) + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + w.write(w.Indent) + } + + s := fmt.Sprint(entry.key.Interface()) + b, err := json.Marshal(s) + if err != nil { + return err + } + w.write(string(b)) + + w.write(`:`) + if w.Indent != "" { + w.write(` `) + } + + if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil { + return err + } + comma = "," + } + if w.Indent != "" { + w.write("\n") + w.write(indent) + w.write(w.Indent) + } + w.write(`}`) + return nil + default: + return w.marshalSingularValue(fd, v, indent) + } +} + +func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error { + switch { + case !v.IsValid(): + w.write("null") + return nil + case fd.Message() != nil: + return w.marshalMessage(v.Message(), indent+w.Indent, "") + case fd.Enum() != nil: + if fd.Enum().FullName() == "google.protobuf.NullValue" { + w.write("null") + return nil + } + + vd := fd.Enum().Values().ByNumber(v.Enum()) + if vd == nil || w.EnumsAsInts { + w.write(strconv.Itoa(int(v.Enum()))) + } else { + w.write(`"` + string(vd.Name()) + `"`) + } + return nil + default: + switch v.Interface().(type) { + case float32, float64: + switch { + case math.IsInf(v.Float(), +1): + w.write(`"Infinity"`) + return nil + case math.IsInf(v.Float(), -1): + w.write(`"-Infinity"`) + return nil + case math.IsNaN(v.Float()): + w.write(`"NaN"`) + return nil + } + case int64, uint64: + w.write(fmt.Sprintf(`"%d"`, v.Interface())) + return nil + } + + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + w.write(string(b)) + return nil + } +} diff --git a/vendor/github.com/golang/protobuf/jsonpb/json.go b/vendor/github.com/golang/protobuf/jsonpb/json.go new file mode 100644 index 0000000000..480e2448de --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/json.go @@ -0,0 +1,69 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jsonpb provides functionality to marshal and unmarshal between a +// protocol buffer message and JSON. It follows the specification at +// https://developers.google.com/protocol-buffers/docs/proto3#json. +// +// Do not rely on the default behavior of the standard encoding/json package +// when called on generated message types as it does not operate correctly. +// +// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson" +// package instead. +package jsonpb + +import ( + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// AnyResolver takes a type URL, present in an Any message, +// and resolves it into an instance of the associated message. +type AnyResolver interface { + Resolve(typeURL string) (proto.Message, error) +} + +type anyResolver struct{ AnyResolver } + +func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + return r.FindMessageByURL(string(message)) +} + +func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) { + m, err := r.Resolve(url) + if err != nil { + return nil, err + } + return protoimpl.X.MessageTypeOf(m), nil +} + +func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByName(field) +} + +func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) +} + +func wellKnownType(s protoreflect.FullName) string { + if s.Parent() == "google.protobuf" { + switch s.Name() { + case "Empty", "Any", + "BoolValue", "BytesValue", "StringValue", + "Int32Value", "UInt32Value", "FloatValue", + "Int64Value", "UInt64Value", "DoubleValue", + "Duration", "Timestamp", + "NullValue", "Struct", "Value", "ListValue": + return string(s.Name()) + } + } + return "" +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 0000000000..dc5225b6d4 --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := int(dstStart.Sub(srcStart) / srcInterval) + srcIndex += advance + srcStart = srcStart.Add(time.Duration(advance) * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 0000000000..c646a6952e --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 0000000000..d6c71101e4 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// addMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 0000000000..eae2a99f54 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1130 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "context" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// HTTP ServeMux paths. +const ( + debugRequestsPath = "/debug/requests" + debugEventsPath = "/debug/events" +) + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) + if pat == debugRequestsPath { + panic("/debug/requests is already registered. You may have two independent copies of " + + "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + + "involve a vendored copy of golang.org/x/net/trace.") + } + + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc(debugRequestsPath, Traces) + http.HandleFunc(debugEventsPath, Events) +} + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Since(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go new file mode 100644 index 0000000000..a6b5081888 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -0,0 +1,203 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.9 +// source: google/rpc/status.proto + +package status + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The `Status` type defines a logical error model that is suitable for +// different programming environments, including REST APIs and RPC APIs. It is +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. +// +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_google_rpc_status_proto_rawDescGZIP(), []int{0} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +var File_google_rpc_status_proto protoreflect.FileDescriptor + +var file_google_rpc_status_proto_rawDesc = []byte{ + 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_rpc_status_proto_rawDescOnce sync.Once + file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc +) + +func file_google_rpc_status_proto_rawDescGZIP() []byte { + file_google_rpc_status_proto_rawDescOnce.Do(func() { + file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData) + }) + return file_google_rpc_status_proto_rawDescData +} + +var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_rpc_status_proto_goTypes = []interface{}{ + (*Status)(nil), // 0: google.rpc.Status + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_google_rpc_status_proto_depIdxs = []int32{ + 1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_rpc_status_proto_init() } +func file_google_rpc_status_proto_init() { + if File_google_rpc_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_status_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_status_proto_goTypes, + DependencyIndexes: file_google_rpc_status_proto_depIdxs, + MessageInfos: file_google_rpc_status_proto_msgTypes, + }.Build() + File_google_rpc_status_proto = out.File + file_google_rpc_status_proto_rawDesc = nil + file_google_rpc_status_proto_goTypes = nil + file_google_rpc_status_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 0000000000..e491a9e7f7 --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md new file mode 100644 index 0000000000..9d4213ebca --- /dev/null +++ b/vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md new file mode 100644 index 0000000000..608aa6e1ac --- /dev/null +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -0,0 +1,73 @@ +# How to contribute + +We definitely welcome your patches and contributions to gRPC! Please read the gRPC +organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md) +and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding. + +If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) + +## Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). + +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single + concern**. We often times receive PRs that are trying to fix several things at + a time, but only one fix is considered acceptable, nothing gets merged and + both author's & review's time is wasted. Create more PRs to address different + concerns and everyone will be happy. + +- If you are searching for features to work on, issues labeled [Status: Help + Wanted](https://github.com/grpc/grpc-go/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc+label%3A%22Status%3A+Help+Wanted%22) + is a great place to start. These issues are well-documented and usually can be + resolved with a single pull request. + +- If you are adding a new file, make sure it has the copyright message template + at the top as a comment. You can copy over the message from an existing file + and update the year. + +- The grpc package should only depend on standard Go packages and a small number + of exceptions. If your contribution introduces new dependencies which are NOT + in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a + discussion with gRPC-Go authors and consultants. + +- For speculative changes, consider opening an issue and discussing it first. If + you are suggesting a behavioral or API change, consider starting with a [gRFC + proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made + and **why** it was made. Link to a github issue if it exists. + +- If you want to fix formatting or style, consider whether your changes are an + obvious improvement or might be considered a personal preference. If a style + change is based on preference, it likely will not be accepted. If it corrects + widely agreed-upon anti-patterns, then please do create a PR and explain the + benefits of the change. + +- Unless your PR is trivial, you should expect there will be reviewer comments + that you'll need to address before merging. We'll mark it as `Status: Requires + Reporter Clarification` if we expect you to respond to these comments in a + timely manner. If the PR remains inactive for 6 days, it will be marked as + `stale` and automatically close 7 days after that if we don't hear back from + you. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs + with messy commit history are difficult to review and won't be merged. Use + `rebase -i upstream/master` to curate your commit history and/or to bring in + latest changes from master (but avoid rebasing in the middle of a code + review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we + can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We + recommend you **run tests locally** before creating your PR to catch breakages + early on. + - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode + +- Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/GOVERNANCE.md b/vendor/google.golang.org/grpc/GOVERNANCE.md new file mode 100644 index 0000000000..d6ff267471 --- /dev/null +++ b/vendor/google.golang.org/grpc/GOVERNANCE.md @@ -0,0 +1 @@ +This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md). diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md new file mode 100644 index 0000000000..c6672c0a3e --- /dev/null +++ b/vendor/google.golang.org/grpc/MAINTAINERS.md @@ -0,0 +1,28 @@ +This page lists all active maintainers of this repository. If you were a +maintainer and would like to add your name to the Emeritus list, please send us a +PR. + +See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md) +for governance guidelines and how to become a maintainer. +See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) +for general contribution guidelines. + +## Maintainers (in alphabetical order) + +- [cesarghali](https://github.com/cesarghali), Google LLC +- [dfawley](https://github.com/dfawley), Google LLC +- [easwars](https://github.com/easwars), Google LLC +- [menghanl](https://github.com/menghanl), Google LLC +- [srini100](https://github.com/srini100), Google LLC + +## Emeritus Maintainers (in alphabetical order) +- [adelez](https://github.com/adelez), Google LLC +- [canguler](https://github.com/canguler), Google LLC +- [iamqizhao](https://github.com/iamqizhao), Google LLC +- [jadekler](https://github.com/jadekler), Google LLC +- [jtattermusch](https://github.com/jtattermusch), Google LLC +- [lyuxuan](https://github.com/lyuxuan), Google LLC +- [makmukhi](https://github.com/makmukhi), Google LLC +- [matt-kwong](https://github.com/matt-kwong), Google LLC +- [nicolasnoble](https://github.com/nicolasnoble), Google LLC +- [yongni](https://github.com/yongni), Google LLC diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile new file mode 100644 index 0000000000..1f8960922b --- /dev/null +++ b/vendor/google.golang.org/grpc/Makefile @@ -0,0 +1,46 @@ +all: vet test testrace + +build: + go build google.golang.org/grpc/... + +clean: + go clean -i google.golang.org/grpc/... + +deps: + GO111MODULE=on go get -d -v google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go generate google.golang.org/grpc/... + +test: + go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testsubmodule: + cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... + cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/... + +testrace: + go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +testdeps: + GO111MODULE=on go get -d -v -t google.golang.org/grpc/... + +vet: vetdeps + ./vet.sh + +vetdeps: + ./vet.sh -install + +.PHONY: \ + all \ + build \ + clean \ + proto \ + test \ + testrace \ + vet \ + vetdeps diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt new file mode 100644 index 0000000000..530197749e --- /dev/null +++ b/vendor/google.golang.org/grpc/NOTICE.txt @@ -0,0 +1,13 @@ +Copyright 2014 gRPC authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md new file mode 100644 index 0000000000..1bc92248cb --- /dev/null +++ b/vendor/google.golang.org/grpc/README.md @@ -0,0 +1,107 @@ +# gRPC-Go + +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) +[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] +[![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) + +The [Go][] implementation of [gRPC][]: A high performance, open source, general +RPC framework that puts mobile and HTTP/2 first. For more information see the +[Go gRPC docs][], or jump directly into the [quick start][]. + +## Prerequisites + +- **[Go][]**: any one of the **three latest major** [releases][go-releases]. + +## Installation + +Simply add the following import to your code, and then `go [build|run|test]` +will automatically fetch the necessary dependencies: + + +```go +import "google.golang.org/grpc" +``` + +> **Note:** If you are trying to access `grpc-go` from **China**, see the +> [FAQ](#FAQ) below. + +## Learn more + +- [Go gRPC docs][], which include a [quick start][] and [API + reference][API] among other resources +- [Low-level technical docs](Documentation) from this repository +- [Performance benchmark][] +- [Examples](examples) + +## FAQ + +### I/O Timeout Errors + +The `golang.org` domain may be blocked from some countries. `go get` usually +produces an error like the following when this happens: + +```console +$ go get -u google.golang.org/grpc +package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout) +``` + +To build Go code, there are several options: + +- Set up a VPN and access google.golang.org through that. + +- With Go module support: it is possible to use the `replace` feature of `go + mod` to create aliases for golang.org packages. In your project's directory: + + ```sh + go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest + go mod tidy + go mod vendor + go build -mod=vendor + ``` + + Again, this will need to be done for all transitive dependencies hosted on + golang.org as well. For details, refer to [golang/go issue + #28652](https://github.com/golang/go/issues/28652). + +### Compiling error, undefined: grpc.SupportPackageIsVersion + +Please update to the latest version of gRPC-Go using +`go get google.golang.org/grpc`. + +### How to turn on logging + +The default logger is controlled by environment variables. Turn everything on +like this: + +```console +$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99 +$ export GRPC_GO_LOG_SEVERITY_LEVEL=info +``` + +### The RPC failed with error `"code = Unavailable desc = transport is closing"` + +This error means the connection the RPC is using was closed, and there are many +possible reasons, including: + 1. mis-configured transport credentials, connection failed on handshaking + 1. bytes disrupted, possibly by a proxy in between + 1. server shutdown + 1. Keepalive parameters caused connection shutdown, for example if you have + configured your server to terminate connections regularly to [trigger DNS + lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your + [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + to allow longer RPC calls to finish. + +It can be tricky to debug this because the error happens on the client side but +the root cause of the connection being closed is on the server side. Turn on +logging on __both client and server__, and see if there are any transport +errors. + +[API]: https://pkg.go.dev/google.golang.org/grpc +[Go]: https://golang.org +[Go module]: https://github.com/golang/go/wiki/Modules +[gRPC]: https://grpc.io +[Go gRPC docs]: https://grpc.io/docs/languages/go +[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608 +[quick start]: https://grpc.io/docs/languages/go/quickstart +[go-releases]: https://golang.org/doc/devel/release.html diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md new file mode 100644 index 0000000000..be6e108705 --- /dev/null +++ b/vendor/google.golang.org/grpc/SECURITY.md @@ -0,0 +1,3 @@ +# Security Policy + +For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md). diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go new file mode 100644 index 0000000000..712fef4d0f --- /dev/null +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package attributes defines a generic key/value store used in various gRPC +// components. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package attributes + +import ( + "fmt" + "strings" +) + +// Attributes is an immutable struct for storing and retrieving generic +// key/value pairs. Keys must be hashable, and users should define their own +// types for keys. Values should not be modified after they are added to an +// Attributes or if they were received from one. If values implement 'Equal(o +// any) bool', it will be called by (*Attributes).Equal to determine whether +// two values with the same key should be considered equal. +type Attributes struct { + m map[any]any +} + +// New returns a new Attributes containing the key/value pair. +func New(key, value any) *Attributes { + return &Attributes{m: map[any]any{key: value}} +} + +// WithValue returns a new Attributes containing the previous keys and values +// and the new key/value pair. If the same key appears multiple times, the +// last value overwrites all previous values for that key. To remove an +// existing key, use a nil value. value should not be modified later. +func (a *Attributes) WithValue(key, value any) *Attributes { + if a == nil { + return New(key, value) + } + n := &Attributes{m: make(map[any]any, len(a.m)+1)} + for k, v := range a.m { + n.m[k] = v + } + n.m[key] = value + return n +} + +// Value returns the value associated with these attributes for key, or nil if +// no value is associated with key. The returned value should not be modified. +func (a *Attributes) Value(key any) any { + if a == nil { + return nil + } + return a.m[key] +} + +// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is +// implemented for a value in the attributes, it is called to determine if the +// value matches the one stored in the other attributes. If Equal is not +// implemented, standard equality is used to determine if the two values are +// equal. Note that some types (e.g. maps) aren't comparable by default, so +// they must be wrapped in a struct, or in an alias type, with Equal defined. +func (a *Attributes) Equal(o *Attributes) bool { + if a == nil && o == nil { + return true + } + if a == nil || o == nil { + return false + } + if len(a.m) != len(o.m) { + return false + } + for k, v := range a.m { + ov, ok := o.m[k] + if !ok { + // o missing element of a + return false + } + if eq, ok := v.(interface{ Equal(o any) bool }); ok { + if !eq.Equal(ov) { + return false + } + } else if v != ov { + // Fallback to a standard equality check if Value is unimplemented. + return false + } + } + return true +} + +// String prints the attribute map. If any key or values throughout the map +// implement fmt.Stringer, it calls that method and appends. +func (a *Attributes) String() string { + var sb strings.Builder + sb.WriteString("{") + first := true + for k, v := range a.m { + if !first { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) + first = false + } + sb.WriteString("}") + return sb.String() +} + +func str(x any) string { + if v, ok := x.(fmt.Stringer); ok { + return v.String() + } else if v, ok := x.(string); ok { + return v + } + return fmt.Sprintf("<%p>", x) +} + +// MarshalJSON helps implement the json.Marshaler interface, thereby rendering +// the Attributes correctly when printing (via pretty.JSON) structs containing +// Attributes as fields. +// +// Is it impossible to unmarshal attributes from a JSON representation and this +// method is meant only for debugging purposes. +func (a *Attributes) MarshalJSON() ([]byte, error) { + return []byte(a.String()), nil +} diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 0000000000..29475e31c9 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// See internal/backoff package for the backoff implementation. This file is +// kept for the exported types and API backward compatibility. + +package grpc + +import ( + "time" + + "google.golang.org/grpc/backoff" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +var DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +// +// Deprecated: use ConnectParams instead. Will be supported throughout 1.x. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// ConnectParams defines the parameters for connecting and retrying. Users are +// encouraged to use this instead of the BackoffConfig type defined above. See +// here for more details: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ConnectParams struct { + // Backoff specifies the configuration options for connection backoff. + Backoff backoff.Config + // MinConnectTimeout is the minimum amount of time we are willing to give a + // connection to complete. + MinConnectTimeout time.Duration +} diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go new file mode 100644 index 0000000000..0787d0b50c --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff/backoff.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff provides configuration options for backoff. +// +// More details can be found at: +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// All APIs in this package are experimental. +package backoff + +import "time" + +// Config defines the configuration options for backoff. +type Config struct { + // BaseDelay is the amount of time to backoff after the first failure. + BaseDelay time.Duration + // Multiplier is the factor with which to multiply backoffs after a + // failed retry. Should ideally be greater than 1. + Multiplier float64 + // Jitter is the factor with which backoffs are randomized. + Jitter float64 + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration +} + +// DefaultConfig is a backoff configuration with the default values specfied +// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +// +// This should be useful for callers who want to configure backoff with +// non-default values only for a subset of the options. +var DefaultConfig = Config{ + BaseDelay: 1.0 * time.Second, + Multiplier: 1.6, + Jitter: 0.2, + MaxDelay: 120 * time.Second, +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 0000000000..b6377f445a --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,427 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "context" + "encoding/json" + "errors" + "net" + "strings" + + "google.golang.org/grpc/channelz" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) +) + +// Register registers the balancer builder to the balancer map. b.Name +// (lowercased) will be used as the name registered with this builder. If the +// Builder implements ConfigParser, ParseConfig will be called when new service +// configs are received by the resolver, and the result will be provided to the +// Balancer in UpdateClientConnState. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Balancers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[strings.ToLower(b.Name())] = b +} + +// unregisterForTesting deletes the balancer with the given name from the +// balancer map. +// +// This function is not thread-safe. +func unregisterForTesting(name string) { + delete(m, name) +} + +func init() { + internal.BalancerUnregister = unregisterForTesting +} + +// Get returns the resolver builder registered with the given name. +// Note that the compare is done in a case-insensitive fashion. +// If no builder is register with the name, nil will be returned. +func Get(name string) Builder { + if b, ok := m[strings.ToLower(name)]; ok { + return b + } + return nil +} + +// A SubConn represents a single connection to a gRPC backend service. +// +// Each SubConn contains a list of addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger the +// connecting, Balancers must call Connect. If a connection re-enters IDLE, +// Balancers must call Connect again to trigger a new connection attempt. +// +// gRPC will try to connect to the addresses in sequence, and stop trying the +// remainder once the first connection is successful. If an attempt to connect +// to all addresses encounters an error, the SubConn will enter +// TRANSIENT_FAILURE for a backoff period, and then transition to IDLE. +// +// Once established, if a connection is lost, the SubConn will transition +// directly to IDLE. +// +// This interface is to be implemented by gRPC. Users should not need their own +// implementation of this interface. For situations like testing, any +// implementations should embed this interface. This allows gRPC to add new +// methods to this interface. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct { + // CredsBundle is the credentials bundle that will be used in the created + // SubConn. If it's nil, the original creds from grpc DialOptions will be + // used. + // + // Deprecated: Use the Attributes field in resolver.Address to pass + // arbitrary data to the credential handshaker. + CredsBundle credentials.Bundle + // HealthCheckEnabled indicates whether health check service should be + // enabled on this SubConn + HealthCheckEnabled bool + // StateListener is called when the state of the subconn changes. If nil, + // Balancer.UpdateSubConnState will be called instead. Will never be + // invoked until after Connect() is called on the SubConn created with + // these options. + StateListener func(SubConnState) +} + +// State contains the balancer's state relevant to the gRPC ClientConn. +type State struct { + // State contains the connectivity state of the balancer, which is used to + // determine the state of the ClientConn. + ConnectivityState connectivity.State + // Picker is used to choose connections (SubConns) for RPCs. + Picker Picker +} + +// ClientConn represents a gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + // + // Deprecated: please be aware that in a future version, SubConns will only + // support one address per SubConn. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + // + // Deprecated: use SubConn.Shutdown instead. + RemoveSubConn(SubConn) + // UpdateAddresses updates the addresses used in the passed in SubConn. + // gRPC checks if the currently connected address is still in the new list. + // If so, the connection will be kept. Else, the connection will be + // gracefully closed, and a new connection will be created. + // + // This may trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. + UpdateAddresses(SubConn, []resolver.Address) + + // UpdateState notifies gRPC that the balancer's internal state has + // changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call + // Pick on the new Picker to pick new SubConns. + UpdateState(State) + + // ResolveNow is called by balancer to notify gRPC to do a name resolving. + ResolveNow(resolver.ResolveNowOptions) + + // Target returns the dial target for this ClientConn. + // + // Deprecated: Use the Target field in the BuildOptions instead. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credentials to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle to use when communicating with a + // remote load balancer server. Balancer implementations which do not + // communicate with a remote load balancer server can ignore this field. + CredsBundle credentials.Bundle + // Dialer is the custom dialer to use when communicating with a remote load + // balancer server. Balancer implementations which do not communicate with a + // remote load balancer server can ignore this field. + Dialer func(context.Context, string) (net.Conn, error) + // Authority is the server name to use as part of the authentication + // handshake when communicating with a remote load balancer server. Balancer + // implementations which do not communicate with a remote load balancer + // server can ignore this field. + Authority string + // ChannelzParentID is the parent ClientConn's channelz ID. + ChannelzParentID *channelz.Identifier + // CustomUserAgent is the custom user agent set on the parent ClientConn. + // The balancer should set the same custom user agent if it creates a + // ClientConn. + CustomUserAgent string + // Target contains the parsed address info of the dial target. It is the + // same resolver.Target as passed to the resolver. See the documentation for + // the resolver.Target type for details about what it contains. + Target resolver.Target +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// ConfigParser parses load balancer configs. +type ConfigParser interface { + // ParseConfig parses the JSON load balancer config provided into an + // internal form or returns an error if the config is invalid. For future + // compatibility reasons, unknown fields in the config should be ignored. + ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) +} + +// PickInfo contains additional information for the Pick operation. +type PickInfo struct { + // FullMethodName is the method name that NewClientStream() is called + // with. The canonical format is /service/Method. + FullMethodName string + // Ctx is the RPC's context, and may contain relevant RPC-level information + // like the outgoing header metadata. + Ctx context.Context +} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error + // Trailer contains the metadata from the RPC's trailer, if present. + Trailer metadata.MD + // BytesSent indicates if any bytes have been sent to the server. + BytesSent bool + // BytesReceived indicates if any byte has been received from the server. + BytesReceived bool + // ServerLoad is the load received from server. It's usually sent as part of + // trailing metadata. + // + // The only supported type now is *orca_v3.LoadReport. + ServerLoad any +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + // + // Deprecated: return an appropriate error based on the last resolution or + // connection attempt instead. The behavior is the same for any non-gRPC + // status error. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) + +// PickResult contains information related to a connection chosen for an RPC. +type PickResult struct { + // SubConn is the connection to use for this pick, if its state is Ready. + // If the state is not Ready, gRPC will block the RPC until a new Picker is + // provided by the balancer (using ClientConn.UpdateState). The SubConn + // must be one returned by ClientConn.NewSubConn. + SubConn SubConn + + // Done is called when the RPC is completed. If the SubConn is not ready, + // this will be called with a nil parameter. If the SubConn is not a valid + // type, Done may not be called. May be nil if the balancer does not wish + // to be notified when the RPC completes. + Done func(DoneInfo) + + // Metadata provides a way for LB policies to inject arbitrary per-call + // metadata. Any metadata returned here will be merged with existing + // metadata added by the client application. + // + // LB policies with child policies are responsible for propagating metadata + // injected by their children to the ClientConn, as part of Pick(). + Metadata metadata.MD +} + +// TransientFailureError returns e. It exists for backward compatibility and +// will be deleted soon. +// +// Deprecated: no longer necessary, picker errors are treated this way by +// default. +func TransientFailureError(e error) error { return e } + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateState(). +type Picker interface { + // Pick returns the connection to use for this RPC and related information. + // + // Pick should not block. If the balancer needs to do I/O or any blocking + // or time-consuming work to service this call, it should return + // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when + // the Picker is updated (using ClientConn.UpdateState). + // + // If an error is returned: + // + // - If the error is ErrNoSubConnAvailable, gRPC will block until a new + // Picker is provided by the balancer (using ClientConn.UpdateState). + // + // - If the error is a status error (implemented by the grpc/status + // package), gRPC will terminate the RPC with the code and message + // provided. + // + // - For all other errors, wait for ready RPCs will wait, but non-wait for + // ready RPCs will be terminated with this error's Error() string and + // status code Unavailable. + Pick(info PickInfo) (PickResult, error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are +// guaranteed to be called synchronously from the same goroutine. There's no +// guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // UpdateClientConnState is called by gRPC when the state of the ClientConn + // changes. If the error returned is ErrBadResolverState, the ClientConn + // will begin calling ResolveNow on the active name resolver with + // exponential backoff until a subsequent call to UpdateClientConnState + // returns a nil error. Any other errors are currently ignored. + UpdateClientConnState(ClientConnState) error + // ResolverError is called by gRPC when the name resolver reports an error. + ResolverError(error) + // UpdateSubConnState is called by gRPC when the state of a SubConn + // changes. + // + // Deprecated: Use NewSubConnOptions.StateListener when creating the + // SubConn instead. + UpdateSubConnState(SubConn, SubConnState) + // Close closes the balancer. The balancer is not currently required to + // call SubConn.Shutdown for its existing SubConns; however, this will be + // required in a future release, so it is recommended. + Close() +} + +// ExitIdler is an optional interface for balancers to implement. If +// implemented, ExitIdle will be called when ClientConn.Connect is called, if +// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause +// all SubConns to connect. +// +// Notice: it will be required for all balancers to implement this in a future +// release. +type ExitIdler interface { + // ExitIdle instructs the LB policy to reconnect to backends / exit the + // IDLE state, if appropriate and possible. Note that SubConns that enter + // the IDLE state will not reconnect until SubConn.Connect is called. + ExitIdle() +} + +// SubConnState describes the state of a SubConn. +type SubConnState struct { + // ConnectivityState is the connectivity state of the SubConn. + ConnectivityState connectivity.State + // ConnectionError is set if the ConnectivityState is TransientFailure, + // describing the reason the SubConn failed. Otherwise, it is nil. + ConnectionError error +} + +// ClientConnState describes the state of a ClientConn relevant to the +// balancer. +type ClientConnState struct { + ResolverState resolver.State + // The parsed load balancing configuration returned by the builder's + // ParseConfig method, if implemented. + BalancerConfig serviceconfig.LoadBalancingConfig +} + +// ErrBadResolverState may be returned by UpdateClientConnState to indicate a +// problem with the provided name resolver data. +var ErrBadResolverState = errors.New("bad resolver state") + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Should also return a close function that will be called when all + // references to the Producer have been given up. + Build(grpcClientConnInterface any) (p Producer, close func()) +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go new file mode 100644 index 0000000000..a7f1eeec8e --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -0,0 +1,264 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package base + +import ( + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +var logger = grpclog.Component("balancer") + +type baseBuilder struct { + name string + pickerBuilder PickerBuilder + config Config +} + +func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + bal := &baseBalancer{ + cc: cc, + pickerBuilder: bb.pickerBuilder, + + subConns: resolver.NewAddressMap(), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &balancer.ConnectivityStateEvaluator{}, + config: bb.config, + state: connectivity.Connecting, + } + // Initialize picker to a picker that always returns + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateState with this picker. + bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable) + return bal +} + +func (bb *baseBuilder) Name() string { + return bb.name +} + +type baseBalancer struct { + cc balancer.ClientConn + pickerBuilder PickerBuilder + + csEvltr *balancer.ConnectivityStateEvaluator + state connectivity.State + + subConns *resolver.AddressMap + scStates map[balancer.SubConn]connectivity.State + picker balancer.Picker + config Config + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure +} + +func (b *baseBalancer) ResolverError(err error) { + b.resolverErr = err + if b.subConns.Len() == 0 { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.picker, + }) +} + +func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { + // TODO: handle s.ResolverState.ServiceConfig? + if logger.V(2) { + logger.Info("base.baseBalancer: got new ClientConn state: ", s) + } + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := resolver.NewAddressMap() + for _, a := range s.ResolverState.Addresses { + addrsSet.Set(a, nil) + if _, ok := b.subConns.Get(a); !ok { + // a is a new address (not existing in b.subConns). + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: b.config.HealthCheck, + StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) + if err != nil { + logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns.Set(a, sc) + b.scStates[sc] = connectivity.Idle + b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle) + sc.Connect() + } + } + for _, a := range b.subConns.Keys() { + sci, _ := b.subConns.Get(a) + sc := sci.(balancer.SubConn) + // a was removed by resolver. + if _, ok := addrsSet.Get(a); !ok { + sc.Shutdown() + b.subConns.Delete(a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in updateSubConnState. + } + } + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + + b.regeneratePicker() + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) + return nil +} + +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *baseBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker is +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. +func (b *baseBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = NewErrPicker(b.mergeErrors()) + return + } + readySCs := make(map[balancer.SubConn]SubConnInfo) + + // Filter out all ready SCs from full subConn map. + for _, addr := range b.subConns.Keys() { + sci, _ := b.subConns.Get(addr) + sc := sci.(balancer.SubConn) + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[sc] = SubConnInfo{Address: addr} + } + } + b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) +} + +// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. +func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + s := state.ConnectivityState + if logger.V(2) { + logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + } + oldS, ok := b.scStates[sc] + if !ok { + if logger.V(2) { + logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + } + return + } + if oldS == connectivity.TransientFailure && + (s == connectivity.Connecting || s == connectivity.Idle) { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + if s == connectivity.Idle { + sc.Connect() + } + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError + } + + b.state = b.csEvltr.RecordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + b.state == connectivity.TransientFailure { + b.regeneratePicker() + } + b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker}) +} + +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call Shutdown for the SubConns. +func (b *baseBalancer) Close() { +} + +// ExitIdle is a nop because the base balancer attempts to stay connected to +// all SubConns at all times. +func (b *baseBalancer) ExitIdle() { +} + +// NewErrPicker returns a Picker that always returns err on Pick(). +func NewErrPicker(err error) balancer.Picker { + return &errPicker{err: err} +} + +// NewErrPickerV2 is temporarily defined for backward compatibility reasons. +// +// Deprecated: use NewErrPicker instead. +var NewErrPickerV2 = NewErrPicker + +type errPicker struct { + err error // Pick() always returns this err. +} + +func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + return balancer.PickResult{}, p.err +} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go new file mode 100644 index 0000000000..e31d76e338 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -0,0 +1,71 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package base defines a balancer base that can be used to build balancers with +// different picking algorithms. +// +// The base balancer creates a new SubConn for each resolved address. The +// provided picker will only be notified about READY SubConns. +// +// This package is the base of round_robin balancer, its purpose is to be used +// to build round_robin like balancers with complex picking algorithms. +// Balancers with more complicated logic should try to implement a balancer +// builder from scratch. +// +// All APIs in this package are experimental. +package base + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// PickerBuilder creates balancer.Picker. +type PickerBuilder interface { + // Build returns a picker that will be used by gRPC to pick a SubConn. + Build(info PickerBuildInfo) balancer.Picker +} + +// PickerBuildInfo contains information needed by the picker builder to +// construct a picker. +type PickerBuildInfo struct { + // ReadySCs is a map from all ready SubConns to the Addresses used to + // create them. + ReadySCs map[balancer.SubConn]SubConnInfo +} + +// SubConnInfo contains information about a SubConn created by the base +// balancer. +type SubConnInfo struct { + Address resolver.Address // the address used to create this SubConn +} + +// Config contains the config info about the base balancer builder. +type Config struct { + // HealthCheck indicates whether health checking should be enabled for this specific balancer. + HealthCheck bool +} + +// NewBalancerBuilder returns a base balancer builder configured by the provided config. +func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder { + return &baseBuilder{ + name: name, + pickerBuilder: pb, + config: config, + } +} diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go new file mode 100644 index 0000000000..c334135810 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package balancer + +import "google.golang.org/grpc/connectivity" + +// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns +// and returns one aggregated connectivity state. +// +// It's not thread safe. +type ConnectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transient failure state. + numIdle uint64 // Number of addrConns in idle state. +} + +// RecordTransition records state change happening in subConn and based on that +// it evaluates what aggregated state should be. +// +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// +// Shutdown is not considered. +func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + case connectivity.Idle: + cse.numIdle += updateVal + } + } + return cse.CurrentState() +} + +// CurrentState returns the current aggregate conn state by evaluating the counters +func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + if cse.numIdle > 0 { + return connectivity.Idle + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go new file mode 100644 index 0000000000..4ecfa1c215 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package state declares grpclb types to be set by resolvers wishing to pass +// information to grpclb via resolver.State Attributes. +package state + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.grpclb.state") + +// State contains gRPCLB-relevant data passed from the name resolver. +type State struct { + // BalancerAddresses contains the remote load balancer address(es). If + // set, overrides any resolver-provided addresses with Type of GRPCLB. + BalancerAddresses []resolver.Address +} + +// Set returns a copy of the provided state with attributes containing s. s's +// data should not be mutated after calling Set. +func Set(state resolver.State, s *State) resolver.State { + state.Attributes = state.Attributes.WithValue(key, s) + return state +} + +// Get returns the grpclb State in the resolver.State, or nil if not present. +// The returned data should not be mutated. +func Get(state resolver.State) *State { + s, _ := state.Attributes.Value(key).(*State) + return s +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 0000000000..f7031ad225 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,81 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcrand" +) + +// Name is the name of round_robin balancer. +const Name = "round_robin" + +var logger = grpclog.Component("roundrobin") + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrPickerBuilder struct{} + +func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { + logger.Infof("roundrobinPicker: Build called with info: %v", info) + if len(info.ReadySCs) == 0 { + return base.NewErrPicker(balancer.ErrNoSubConnAvailable) + } + scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) + for sc := range info.ReadySCs { + scs = append(scs, sc) + } + return &rrPicker{ + subConns: scs, + // Start at a random index, as the same RR balancer rebuilds a new + // picker when SubConn states change, and we don't want to apply excess + // load to the first server in the list. + next: uint32(grpcrand.Intn(len(scs))), + } +} + +type rrPicker struct { + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + next uint32 +} + +func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + subConnsLen := uint32(len(p.subConns)) + nextIndex := atomic.AddUint32(&p.next, 1) + + sc := p.subConns[nextIndex%subConnsLen] + return balancer.PickResult{SubConn: sc}, nil +} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 0000000000..a4411c22bf --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,454 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "strings" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/resolver" +) + +type ccbMode int + +const ( + ccbModeActive = iota + ccbModeIdle + ccbModeClosed + ccbModeExitingIdle +) + +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen synchronously and in order. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. +type ccBalancerWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + opts balancer.BuildOptions + + // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled in the serializer. Fields + // accessed *only* in these serializer callbacks, can therefore be accessed + // without a mutex. + balancer *gracefulswitch.Balancer + curBalancerName string + + // mu guards access to the below fields. Access to the serializer and its + // cancel function needs to be mutex protected because they are overwritten + // when the wrapper exits idle mode. + mu sync.Mutex + serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. + serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. + mode ccbMode // Tracks the current mode of the wrapper. +} + +// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer +// is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(context.Background()) + ccb := &ccBalancerWrapper{ + cc: cc, + opts: bopts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) + return ccb +} + +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.mu.Lock() + errCh := make(chan error, 1) + // Here and everywhere else where Schedule() is called, it is done with the + // lock held. But the lock guards only the scheduling part. The actual + // callback is called asynchronously without the lock being held. + ok := ccb.serializer.Schedule(func(_ context.Context) { + errCh <- ccb.balancer.UpdateClientConnState(*ccs) + }) + if !ok { + // If we are unable to schedule a function with the serializer, it + // indicates that it has been closed. A serializer is only closed when + // the wrapper is closed or is in idle. + ccb.mu.Unlock() + return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") + } + ccb.mu.Unlock() + + // We get here only if the above call to Schedule succeeds, in which case it + // is guaranteed that the scheduled function will run. Therefore it is safe + // to block on this channel. + err := <-errCh + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + return err +} + +// updateSubConnState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + }) + ccb.mu.Unlock() +} + +func (ccb *ccBalancerWrapper) resolverError(err error) { + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.ResolverError(err) + }) + ccb.mu.Unlock() +} + +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + // TODO: Other languages use case-sensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + ccb.buildLoadBalancingPolicy(name) + }) + ccb.mu.Unlock() +} + +// buildLoadBalancingPolicy performs the following: +// - retrieve a balancer builder for the given name. Use the default LB +// policy, pick_first, if no LB policy with name is found in the registry. +// - instruct the gracefulswitch balancer to switch to the above builder. This +// will actually build the new balancer. +// - update the `curBalancerName` field +// +// Must be called from a serializer callback. +func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() +} + +func (ccb *ccBalancerWrapper) close() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.closeBalancer(ccbModeClosed) +} + +// enterIdleMode is invoked by grpc when the channel enters idle mode upon +// expiry of idle_timeout. This call blocks until the balancer is closed. +func (ccb *ccBalancerWrapper) enterIdleMode() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") + ccb.closeBalancer(ccbModeIdle) +} + +// closeBalancer is invoked when the channel is being closed or when it enters +// idle mode upon expiry of idle_timeout. +func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { + ccb.mu.Unlock() + return + } + + ccb.mode = m + done := ccb.serializer.Done() + b := ccb.balancer + ok := ccb.serializer.Schedule(func(_ context.Context) { + // Close the serializer to ensure that no more calls from gRPC are sent + // to the balancer. + ccb.serializerCancel() + // Empty the current balancer name because we don't have a balancer + // anymore and also so that we act on the next call to switchTo by + // creating a new balancer specified by the new resolver. + ccb.curBalancerName = "" + }) + if !ok { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + + // Give enqueued callbacks a chance to finish before closing the balancer. + <-done + b.Close() +} + +// exitIdleMode is invoked by grpc when the channel exits idle mode either +// because of an RPC or because of an invocation of the Connect() API. This +// recreates the balancer that was closed previously when entering idle mode. +// +// If the channel is not in idle mode, we know for a fact that we are here as a +// result of the user calling the Connect() method on the ClientConn. In this +// case, we can simply forward the call to the underlying balancer, instructing +// it to reconnect to the backends. +func (ccb *ccBalancerWrapper) exitIdleMode() { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed { + // Request to exit idle is a no-op when wrapper is already closed. + ccb.mu.Unlock() + return + } + + if ccb.mode == ccbModeIdle { + // Recreate the serializer which was closed when we entered idle. + ctx, cancel := context.WithCancel(context.Background()) + ccb.serializer = grpcsync.NewCallbackSerializer(ctx) + ccb.serializerCancel = cancel + } + + // The ClientConn guarantees that mutual exclusion between close() and + // exitIdleMode(), and since we just created a new serializer, we can be + // sure that the below function will be scheduled. + done := make(chan struct{}) + ccb.serializer.Schedule(func(_ context.Context) { + defer close(done) + + ccb.mu.Lock() + defer ccb.mu.Unlock() + + if ccb.mode != ccbModeIdle { + ccb.balancer.ExitIdle() + return + } + + // Gracefulswitch balancer does not support a switchTo operation after + // being closed. Hence we need to create a new one here. + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) + ccb.mode = ccbModeActive + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") + + }) + ccb.mu.Unlock() + + <-done +} + +func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { + ccb.mu.Lock() + defer ccb.mu.Unlock() + return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if ccb.isIdleOrClosed() { + return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") + } + + if len(addrs) == 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ac, err := ccb.cc.newAddrConn(addrs, opts) + if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) + return nil, err + } + acbw := &acBalancerWrapper{ + ccb: ccb, + ac: ac, + producers: make(map[balancer.ProducerBuilder]*refCountedProducer), + stateListener: opts.StateListener, + } + ac.acbw = acbw + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + // The graceful switch balancer will never call this. + logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") +} + +func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + if ccb.isIdleOrClosed() { + return + } + + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + acbw.UpdateAddresses(addrs) +} + +func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + if ccb.isIdleOrClosed() { + return + } + + // Update picker before updating state. Even though the ordering here does + // not matter, it can lead to multiple calls of Pick in the common start-up + // case where we wait for ready and then perform an RPC. If the picker is + // updated later, we could call the "connecting" picker when the state is + // updated, and then call the "ready" picker after the picker gets updated. + ccb.cc.blockingpicker.updatePicker(s.Picker) + ccb.cc.csMgr.updateState(s.ConnectivityState) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + if ccb.isIdleOrClosed() { + return + } + + ccb.cc.resolveNow(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + ac *addrConn // read-only + ccb *ccBalancerWrapper // read-only + stateListener func(balancer.SubConnState) + + mu sync.Mutex + producers map[balancer.ProducerBuilder]*refCountedProducer +} + +func (acbw *acBalancerWrapper) String() string { + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.ac.updateAddrs(addrs) +} + +func (acbw *acBalancerWrapper) Connect() { + go acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) Shutdown() { + ccb := acbw.ccb + if ccb.isIdleOrClosed() { + // It it safe to ignore this call when the balancer is closed or in idle + // because the ClientConn takes care of closing the connections. + // + // Not returning early from here when the balancer is closed or in idle + // leads to a deadlock though, because of the following sequence of + // calls when holding cc.mu: + // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> + // ccb.RemoveAddrConn --> cc.removeAddrConn + return + } + + ccb.cc.removeAddrConn(acbw.ac, errConnDrain) +} + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, blocks until it is or ctx expires. Returns an error when the context +// expires or the addrConn is shut down. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport, err := acbw.ac.getTransport(ctx) + if err != nil { + return nil, err + } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} + +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) +} + +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function +} + +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, close := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: close} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go new file mode 100644 index 0000000000..5954801122 --- /dev/null +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -0,0 +1,1183 @@ +// Copyright 2018 The gRPC Authors +// All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.22.0 +// source: grpc/binlog/v1/binarylog.proto + +package grpc_binarylog_v1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Enumerates the type of event +// Note the terminology is different from the RPC semantics +// definition, but the same meaning is expressed here. +type GrpcLogEntry_EventType int32 + +const ( + GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0 + // Header sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1 + // Header sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2 + // Message sent from client to server + GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3 + // Message sent from server to client + GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4 + // A signal that client is done sending + GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5 + // Trailer indicates the end of the RPC. + // On client side, this event means a trailer was either received + // from the network or the gRPC library locally generated a status + // to inform the application about a failure. + // On server side, this event means the server application requested + // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after + // this due to races on server side. + GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6 + // A signal that the RPC is cancelled. On client side, this + // indicates the client application requests a cancellation. + // On server side, this indicates that cancellation was detected. + // Note: This marks the end of the RPC. Events may arrive after + // this due to races. For example, on client side a trailer + // may arrive even though the application requested to cancel the RPC. + GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7 +) + +// Enum value maps for GrpcLogEntry_EventType. +var ( + GrpcLogEntry_EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNKNOWN", + 1: "EVENT_TYPE_CLIENT_HEADER", + 2: "EVENT_TYPE_SERVER_HEADER", + 3: "EVENT_TYPE_CLIENT_MESSAGE", + 4: "EVENT_TYPE_SERVER_MESSAGE", + 5: "EVENT_TYPE_CLIENT_HALF_CLOSE", + 6: "EVENT_TYPE_SERVER_TRAILER", + 7: "EVENT_TYPE_CANCEL", + } + GrpcLogEntry_EventType_value = map[string]int32{ + "EVENT_TYPE_UNKNOWN": 0, + "EVENT_TYPE_CLIENT_HEADER": 1, + "EVENT_TYPE_SERVER_HEADER": 2, + "EVENT_TYPE_CLIENT_MESSAGE": 3, + "EVENT_TYPE_SERVER_MESSAGE": 4, + "EVENT_TYPE_CLIENT_HALF_CLOSE": 5, + "EVENT_TYPE_SERVER_TRAILER": 6, + "EVENT_TYPE_CANCEL": 7, + } +) + +func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType { + p := new(GrpcLogEntry_EventType) + *p = x + return p +} + +func (x GrpcLogEntry_EventType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor() +} + +func (GrpcLogEntry_EventType) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0] +} + +func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead. +func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0} +} + +// Enumerates the entity that generates the log entry +type GrpcLogEntry_Logger int32 + +const ( + GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0 + GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1 + GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2 +) + +// Enum value maps for GrpcLogEntry_Logger. +var ( + GrpcLogEntry_Logger_name = map[int32]string{ + 0: "LOGGER_UNKNOWN", + 1: "LOGGER_CLIENT", + 2: "LOGGER_SERVER", + } + GrpcLogEntry_Logger_value = map[string]int32{ + "LOGGER_UNKNOWN": 0, + "LOGGER_CLIENT": 1, + "LOGGER_SERVER": 2, + } +) + +func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger { + p := new(GrpcLogEntry_Logger) + *p = x + return p +} + +func (x GrpcLogEntry_Logger) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor() +} + +func (GrpcLogEntry_Logger) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1] +} + +func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead. +func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1} +} + +type Address_Type int32 + +const ( + Address_TYPE_UNKNOWN Address_Type = 0 + // address is in 1.2.3.4 form + Address_TYPE_IPV4 Address_Type = 1 + // address is in IPv6 canonical form (RFC5952 section 4) + // The scope is NOT included in the address string. + Address_TYPE_IPV6 Address_Type = 2 + // address is UDS string + Address_TYPE_UNIX Address_Type = 3 +) + +// Enum value maps for Address_Type. +var ( + Address_Type_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_IPV4", + 2: "TYPE_IPV6", + 3: "TYPE_UNIX", + } + Address_Type_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_IPV4": 1, + "TYPE_IPV6": 2, + "TYPE_UNIX": 3, + } +) + +func (x Address_Type) Enum() *Address_Type { + p := new(Address_Type) + *p = x + return p +} + +func (x Address_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Address_Type) Descriptor() protoreflect.EnumDescriptor { + return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor() +} + +func (Address_Type) Type() protoreflect.EnumType { + return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2] +} + +func (x Address_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Address_Type.Descriptor instead. +func (Address_Type) EnumDescriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0} +} + +// Log entry we store in binary logs +type GrpcLogEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The timestamp of the binary log message + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Uniquely identifies a call. The value must not be 0 in order to disambiguate + // from an unset value. + // Each call may have several log entries, they will all have the same call_id. + // Nothing is guaranteed about their value other than they are unique across + // different RPCs in the same gRPC process. + CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"` + // The entry sequence id for this call. The first GrpcLogEntry has a + // value of 1, to disambiguate from an unset value. The purpose of + // this field is to detect missing entries in environments where + // durability or ordering is not guaranteed. + SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"` + Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"` + Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum + // The logger uses one of the following fields to record the payload, + // according to the type of the log entry. + // + // Types that are assignable to Payload: + // + // *GrpcLogEntry_ClientHeader + // *GrpcLogEntry_ServerHeader + // *GrpcLogEntry_Message + // *GrpcLogEntry_Trailer + Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"` + // true if payload does not represent the full message or metadata. + PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"` + // Peer address information, will only be recorded on the first + // incoming event. On client side, peer is logged on + // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in + // the case of trailers-only. On server side, peer is always + // logged on EVENT_TYPE_CLIENT_HEADER. + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` +} + +func (x *GrpcLogEntry) Reset() { + *x = GrpcLogEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GrpcLogEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GrpcLogEntry) ProtoMessage() {} + +func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead. +func (*GrpcLogEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0} +} + +func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *GrpcLogEntry) GetCallId() uint64 { + if x != nil { + return x.CallId + } + return 0 +} + +func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 { + if x != nil { + return x.SequenceIdWithinCall + } + return 0 +} + +func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType { + if x != nil { + return x.Type + } + return GrpcLogEntry_EVENT_TYPE_UNKNOWN +} + +func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { + if x != nil { + return x.Logger + } + return GrpcLogEntry_LOGGER_UNKNOWN +} + +func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { + if m != nil { + return m.Payload + } + return nil +} + +func (x *GrpcLogEntry) GetClientHeader() *ClientHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok { + return x.ClientHeader + } + return nil +} + +func (x *GrpcLogEntry) GetServerHeader() *ServerHeader { + if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok { + return x.ServerHeader + } + return nil +} + +func (x *GrpcLogEntry) GetMessage() *Message { + if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok { + return x.Message + } + return nil +} + +func (x *GrpcLogEntry) GetTrailer() *Trailer { + if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok { + return x.Trailer + } + return nil +} + +func (x *GrpcLogEntry) GetPayloadTruncated() bool { + if x != nil { + return x.PayloadTruncated + } + return false +} + +func (x *GrpcLogEntry) GetPeer() *Address { + if x != nil { + return x.Peer + } + return nil +} + +type isGrpcLogEntry_Payload interface { + isGrpcLogEntry_Payload() +} + +type GrpcLogEntry_ClientHeader struct { + ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"` +} + +type GrpcLogEntry_ServerHeader struct { + ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"` +} + +type GrpcLogEntry_Message struct { + // Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE + Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"` +} + +type GrpcLogEntry_Trailer struct { + Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"` +} + +func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} + +func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} + +type ClientHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The name of the RPC method, which looks something like: + // // + // Note the leading "/" character. + MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + // A single process may be used to run multiple virtual + // servers with different identities. + // The authority is the name of such a server identitiy. + // It is typically a portion of the URI in the form of + // or : . + Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` + // the RPC timeout + Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` +} + +func (x *ClientHeader) Reset() { + *x = ClientHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientHeader) ProtoMessage() {} + +func (x *ClientHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead. +func (*ClientHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *ClientHeader) GetMethodName() string { + if x != nil { + return x.MethodName + } + return "" +} + +func (x *ClientHeader) GetAuthority() string { + if x != nil { + return x.Authority + } + return "" +} + +func (x *ClientHeader) GetTimeout() *durationpb.Duration { + if x != nil { + return x.Timeout + } + return nil +} + +type ServerHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *ServerHeader) Reset() { + *x = ServerHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ServerHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServerHeader) ProtoMessage() {} + +func (x *ServerHeader) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead. +func (*ServerHeader) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2} +} + +func (x *ServerHeader) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +type Trailer struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This contains only the metadata from the application. + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The gRPC status code. + StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` + // An original status message before any transport specific + // encoding. + StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"` + // The value of the 'grpc-status-details-bin' metadata key. If + // present, this is always an encoded 'google.rpc.Status' message. + StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` +} + +func (x *Trailer) Reset() { + *x = Trailer{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Trailer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Trailer) ProtoMessage() {} + +func (x *Trailer) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Trailer.ProtoReflect.Descriptor instead. +func (*Trailer) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3} +} + +func (x *Trailer) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Trailer) GetStatusCode() uint32 { + if x != nil { + return x.StatusCode + } + return 0 +} + +func (x *Trailer) GetStatusMessage() string { + if x != nil { + return x.StatusMessage + } + return "" +} + +func (x *Trailer) GetStatusDetails() []byte { + if x != nil { + return x.StatusDetails + } + return nil +} + +// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE +type Message struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Length of the message. It may not be the same as the length of the + // data field, as the logging payload can be truncated or omitted. + Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + // May be truncated or omitted. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *Message) Reset() { + *x = Message{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Message) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Message) ProtoMessage() {} + +func (x *Message) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Message.ProtoReflect.Descriptor instead. +func (*Message) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4} +} + +func (x *Message) GetLength() uint32 { + if x != nil { + return x.Length + } + return 0 +} + +func (x *Message) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +// A list of metadata pairs, used in the payload of client header, +// server header, and server trailer. +// Implementations may omit some entries to honor the header limits +// of GRPC_BINARY_LOG_CONFIG. +// +// Header keys added by gRPC are omitted. To be more specific, +// implementations will not log the following entries, and this is +// not to be treated as a truncation: +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials +// +// Implementations must always log grpc-trace-bin if it is present. +// Practically speaking it will only be visible on server side because +// grpc-trace-bin is managed by low level client side mechanisms +// inaccessible from the application level. On server side, the +// header is just a normal metadata key. +// The pair will not count towards the size limit. +type Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5} +} + +func (x *Metadata) GetEntry() []*MetadataEntry { + if x != nil { + return x.Entry + } + return nil +} + +// A metadata key value pair +type MetadataEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *MetadataEntry) Reset() { + *x = MetadataEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataEntry) ProtoMessage() {} + +func (x *MetadataEntry) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead. +func (*MetadataEntry) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6} +} + +func (x *MetadataEntry) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *MetadataEntry) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +// Address information +type Address struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + // only for TYPE_IPV4 and TYPE_IPV6 + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` +} + +func (x *Address) Reset() { + *x = Address{} + if protoimpl.UnsafeEnabled { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Address) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Address) ProtoMessage() {} + +func (x *Address) ProtoReflect() protoreflect.Message { + mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Address.ProtoReflect.Descriptor instead. +func (*Address) Descriptor() ([]byte, []int) { + return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7} +} + +func (x *Address) GetType() Address_Type { + if x != nil { + return x.Type + } + return Address_TYPE_UNKNOWN +} + +func (x *Address) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *Address) GetIpPort() uint32 { + if x != nil { + return x.IpPort + } + return 0 +} + +var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor + +var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, + 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbb, 0x07, 0x0a, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x17, 0x0a, 0x07, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x06, 0x63, 0x61, 0x6c, 0x6c, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x65, 0x71, 0x75, + 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x5f, 0x63, + 0x61, 0x6c, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x73, 0x65, 0x71, 0x75, 0x65, + 0x6e, 0x63, 0x65, 0x49, 0x64, 0x57, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x43, 0x61, 0x6c, 0x6c, 0x12, + 0x3d, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3e, + 0x0a, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x46, + 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x36, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, + 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x69, + 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x2b, + 0x0a, 0x11, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, + 0x74, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x04, 0x70, + 0x65, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, + 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x04, 0x70, 0x65, 0x65, 0x72, 0x22, 0xf5, 0x01, 0x0a, 0x09, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45, + 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, + 0x1c, 0x0a, 0x18, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, + 0x52, 0x56, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x02, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x1d, 0x0a, 0x19, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, + 0x52, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x04, 0x12, 0x20, 0x0a, 0x1c, 0x45, + 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, + 0x5f, 0x48, 0x41, 0x4c, 0x46, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x10, 0x05, 0x12, 0x1d, 0x0a, + 0x19, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, + 0x45, 0x52, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, + 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, + 0x4c, 0x10, 0x07, 0x22, 0x42, 0x0a, 0x06, 0x4c, 0x6f, 0x67, 0x67, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x0e, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x47, 0x47, 0x45, 0x52, 0x5f, 0x53, + 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x22, 0xbb, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, + 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, + 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x07, 0x74, + 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, + 0x22, 0x47, 0x0a, 0x0c, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, + 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x54, 0x72, + 0x61, 0x69, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, + 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0x35, 0x0a, + 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x36, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x37, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x33, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x31, 0x2e, + 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x07, + 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, + 0x70, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x45, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, + 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x34, 0x10, 0x01, 0x12, 0x0d, + 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x02, 0x12, 0x0d, 0x0a, + 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x49, 0x58, 0x10, 0x03, 0x42, 0x5c, 0x0a, 0x14, + 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, + 0x67, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4c, 0x6f, 0x67, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, + 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, + 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once + file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc +) + +func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { + file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() { + file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData) + }) + return file_grpc_binlog_v1_binarylog_proto_rawDescData +} + +var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{ + (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType + (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger + (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type + (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry + (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader + (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader + (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer + (*Message)(nil), // 7: grpc.binarylog.v1.Message + (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata + (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry + (*Address)(nil), // 10: grpc.binarylog.v1.Address + (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 12: google.protobuf.Duration +} +var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{ + 11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp + 0, // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType + 1, // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger + 4, // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader + 5, // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader + 7, // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message + 6, // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer + 10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address + 8, // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration + 8, // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata + 8, // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata + 9, // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry + 2, // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type + 14, // [14:14] is the sub-list for method output_type + 14, // [14:14] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_grpc_binlog_v1_binarylog_proto_init() } +func file_grpc_binlog_v1_binarylog_proto_init() { + if File_grpc_binlog_v1_binarylog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GrpcLogEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Trailer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Message); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Address); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*GrpcLogEntry_ClientHeader)(nil), + (*GrpcLogEntry_ServerHeader)(nil), + (*GrpcLogEntry_Message)(nil), + (*GrpcLogEntry_Trailer)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc, + NumEnums: 3, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_grpc_binlog_v1_binarylog_proto_goTypes, + DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs, + EnumInfos: file_grpc_binlog_v1_binarylog_proto_enumTypes, + MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes, + }.Build() + File_grpc_binlog_v1_binarylog_proto = out.File + file_grpc_binlog_v1_binarylog_proto_rawDesc = nil + file_grpc_binlog_v1_binarylog_proto_goTypes = nil + file_grpc_binlog_v1_binarylog_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go new file mode 100644 index 0000000000..788c89c16f --- /dev/null +++ b/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// All errors returned by Invoke are compatible with the status package. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.unaryInt != nil { + return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) + } + return invoke(ctx, method, args, reply, cc, opts...) +} + +func combine(o1 []CallOption, o2 []CallOption) []CallOption { + // we don't use append because o1 could have extra capacity whose + // elements would be overwritten, which could cause inadvertent + // sharing (and race conditions) between concurrent calls + if len(o1) == 0 { + return o2 + } else if len(o2) == 0 { + return o1 + } + ret := make([]CallOption, len(o1)+len(o2)) + copy(ret, o1) + copy(ret[len(o1):], o2) + return ret +} + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) +} + +var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} + +func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { + cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(req); err != nil { + return err + } + return cs.RecvMsg(reply) +} diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 0000000000..32b7fa5794 --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// # Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 0000000000..ff7fea1022 --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,2032 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "math" + "net/url" + "strings" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/idle" + "google.golang.org/grpc/internal/pretty" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" + "google.golang.org/grpc/status" + + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. + _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. + _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. +) + +const ( + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second +) + +var ( + // ErrClientConnClosing indicates that the operation is illegal because + // the ClientConn is closing. + // + // Deprecated: this error should not be relied upon by users; use the status + // code of Canceled instead. + ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // errConnIdling indicates the the connection is being closed as the channel + // is moving to an idle mode due to inactivity. + errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") + // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default + // service config. + invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" +) + +// The following errors are returned from Dial and DialContext +var ( + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") + // errTransportCredsAndBundle indicates that creds bundle is used together + // with other individual Transport Credentials. + errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") + // errNoTransportCredsInBundle indicated that the configured creds bundle + // returned a transport credentials which was nil. + errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials") + // errTransportCredentialsMissing indicates that users want to transmit + // security information (e.g., OAuth2 token) which requires secure + // connection on an insecure connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") +) + +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 +) + +// Dial creates a client connection to the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +type defaultConfigSelector struct { + sc *ServiceConfig +} + +func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) { + return &iresolver.RPCConfig{ + Context: rpcInfo.Context, + MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method), + }, nil +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + czData: new(channelzData), + } + + // We start the channel off in idle mode, but kick it out of idle at the end + // of this method, instead of waiting for the first RPC. Other gRPC + // implementations do wait for the first RPC to kick the channel out of + // idle. But doing so would be a major behavior change for our users who are + // used to seeing the channel active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, if at all we ever get to do that. + cc.idlenessState = ccIdlenessStateIdle + + cc.retryThrottler.Store((*retryThrottler)(nil)) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + cc.exitIdleCond = sync.NewCond(&cc.mu) + + disableGlobalOpts := false + for _, opt := range opts { + if _, ok := opt.(*disableGlobalDialOptions); ok { + disableGlobalOpts = true + break + } + } + + if !disableGlobalOpts { + for _, opt := range globalDialOptions { + opt.apply(&cc.dopts) + } + } + + for _, opt := range opts { + opt.apply(&cc.dopts) + } + + chainUnaryClientInterceptors(cc) + chainStreamClientInterceptors(cc) + + defer func() { + if err != nil { + cc.Close() + } + }() + + // Register ClientConn with channelz. + cc.channelzRegistration(target) + + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + + if err := cc.validateTransportCredentials(); err != nil { + return nil, err + } + + if cc.dopts.defaultServiceConfigRawJSON != nil { + scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON) + if scpr.Err != nil { + return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err) + } + cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) + } + cc.mkp = cc.dopts.copts.KeepaliveParams + + if cc.dopts.copts.UserAgent != "" { + cc.dopts.copts.UserAgent += " " + grpcUA + } else { + cc.dopts.copts.UserAgent = grpcUA + } + + if cc.dopts.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) + defer cancel() + } + defer func() { + select { + case <-ctx.Done(): + switch { + case ctx.Err() == err: + conn = nil + case err == nil || !cc.dopts.returnLastError: + conn, err = nil, ctx.Err() + default: + conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err) + } + default: + } + }() + + if cc.dopts.bs == nil { + cc.dopts.bs = backoff.DefaultExponential + } + + // Determine the resolver to use. + if err := cc.parseTargetAndFindResolver(); err != nil { + return nil, err + } + if err = cc.determineAuthority(); err != nil { + return nil, err + } + + if cc.dopts.scChan != nil { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if cc.dopts.scChan != nil { + go cc.scWatcher() + } + + // This creates the name resolver, load balancer, blocking picker etc. + if err := cc.exitIdleMode(); err != nil { + return nil, err + } + + // Configure idleness support with configured idle timeout or default idle + // timeout duration. Idleness can be explicitly disabled by the user, by + // setting the dial option to 0. + cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger}) + + // Return early for non-blocking dials. + if !cc.dopts.block { + return cc, nil + } + + // A blocking dial blocks until the clientConn is ready. + for { + s := cc.GetState() + if s == connectivity.Idle { + cc.Connect() + } + if s == connectivity.Ready { + return cc, nil + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() + } + } +} + +// addTraceEvent is a helper method to add a trace event on the channel. If the +// channel is a nested one, the same event is also added on the parent channel. +func (cc *ClientConn) addTraceEvent(msg string) { + ted := &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel %s", msg), + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) +} + +type idler ClientConn + +func (i *idler) EnterIdleMode() error { + return (*ClientConn)(i).enterIdleMode() +} + +func (i *idler) ExitIdleMode() error { + return (*ClientConn)(i).exitIdleMode() +} + +// exitIdleMode moves the channel out of idle mode by recreating the name +// resolver and load balancer. +func (cc *ClientConn) exitIdleMode() error { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return errConnClosing + } + if cc.idlenessState != ccIdlenessStateIdle { + cc.mu.Unlock() + channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) + return nil + } + + defer func() { + // When Close() and exitIdleMode() race against each other, one of the + // following two can happen: + // - Close() wins the race and runs first. exitIdleMode() runs after, and + // sees that the ClientConn is already closed and hence returns early. + // - exitIdleMode() wins the race and runs first and recreates the balancer + // and releases the lock before recreating the resolver. If Close() runs + // in this window, it will wait for exitIdleMode to complete. + // + // We achieve this synchronization using the below condition variable. + cc.mu.Lock() + cc.idlenessState = ccIdlenessStateActive + cc.exitIdleCond.Signal() + cc.mu.Unlock() + }() + + cc.idlenessState = ccIdlenessStateExitingIdle + exitedIdle := false + if cc.blockingpicker == nil { + cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers) + } else { + cc.blockingpicker.exitIdleMode() + exitedIdle = true + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + if cc.balancerWrapper == nil { + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }) + } else { + cc.balancerWrapper.exitIdleMode() + } + cc.firstResolveEvent = grpcsync.NewEvent() + cc.mu.Unlock() + + // This needs to be called without cc.mu because this builds a new resolver + // which might update state or report error inline which needs to be handled + // by cc.updateResolverState() which also grabs cc.mu. + if err := cc.initResolverWrapper(credsClone); err != nil { + return err + } + + if exitedIdle { + cc.addTraceEvent("exiting idle mode") + } + return nil +} + +// enterIdleMode puts the channel in idle mode, and as part of it shuts down the +// name resolver, load balancer and any subchannels. +func (cc *ClientConn) enterIdleMode() error { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + if cc.idlenessState != ccIdlenessStateActive { + channelz.Errorf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) + cc.mu.Unlock() + return nil + } + + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + conns := cc.conns + cc.conns = make(map[*addrConn]struct{}) + + // TODO: Currently, we close the resolver wrapper upon entering idle mode + // and create a new one upon exiting idle mode. This means that the + // `cc.resolverWrapper` field would be overwritten everytime we exit idle + // mode. While this means that we need to hold `cc.mu` when accessing + // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should + // try to do the same for the balancer and picker wrappers too. + cc.resolverWrapper.close() + cc.blockingpicker.enterIdleMode() + cc.balancerWrapper.enterIdleMode() + cc.csMgr.updateState(connectivity.Idle) + cc.idlenessState = ccIdlenessStateIdle + cc.mu.Unlock() + + go func() { + cc.addTraceEvent("entering idle mode") + for ac := range conns { + ac.tearDown(errConnIdling) + } + }() + return nil +} + +// validateTransportCredentials performs a series of checks on the configured +// transport credentials. It returns a non-nil error if any of these conditions +// are met: +// - no transport creds and no creds bundle is configured +// - both transport creds and creds bundle are configured +// - creds bundle is configured, but it lacks a transport credentials +// - insecure transport creds configured alongside call creds that require +// transport level security +// +// If none of the above conditions are met, the configured credentials are +// deemed valid and a nil error is returned. +func (cc *ClientConn) validateTransportCredentials() error { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return errTransportCredentialsMissing + } + } + } + return nil +} + +// channelzRegistration registers the newly created ClientConn with channelz and +// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. +// A channelz trace event is emitted for ClientConn creation. If the newly +// created ClientConn is a nested one, i.e a valid parent ClientConn ID is +// specified via a dial option, the trace event is also added to the parent. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) channelzRegistration(target string) { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + cc.addTraceEvent("created") +} + +// chainUnaryClientInterceptors chains all unary client interceptors into one. +func chainUnaryClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainUnaryInts + // Prepend dopts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + if cc.dopts.unaryInt != nil { + interceptors = append([]UnaryClientInterceptor{cc.dopts.unaryInt}, interceptors...) + } + var chainedInt UnaryClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) + } + } + cc.dopts.unaryInt = chainedInt +} + +// getChainUnaryInvoker recursively generate the chained unary invoker. +func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, finalInvoker UnaryInvoker) UnaryInvoker { + if curr == len(interceptors)-1 { + return finalInvoker + } + return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { + return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) + } +} + +// chainStreamClientInterceptors chains all stream client interceptors into one. +func chainStreamClientInterceptors(cc *ClientConn) { + interceptors := cc.dopts.chainStreamInts + // Prepend dopts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + if cc.dopts.streamInt != nil { + interceptors = append([]StreamClientInterceptor{cc.dopts.streamInt}, interceptors...) + } + var chainedInt StreamClientInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) { + return interceptors[0](ctx, desc, cc, method, getChainStreamer(interceptors, 0, streamer), opts...) + } + } + cc.dopts.streamInt = chainedInt +} + +// getChainStreamer recursively generate the chained client stream constructor. +func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStreamer Streamer) Streamer { + if curr == len(interceptors)-1 { + return finalStreamer + } + return func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return interceptors[curr+1](ctx, desc, cc, method, getChainStreamer(interceptors, curr+1, finalStreamer), opts...) + } +} + +// newConnectivityStateManager creates an connectivityStateManager with +// the specified id. +func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { + return &connectivityStateManager{ + channelzID: id, + pubSub: grpcsync.NewPubSub(ctx), + } +} + +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +// +// TODO: If possible, get rid of the `connectivityStateManager` type, and +// provide this functionality using the `PubSub`, to avoid keeping track of +// the connectivity state at two places. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} + channelzID *channelz.Identifier + pubSub *grpcsync.PubSub +} + +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return + } + if csm.state == state { + return + } + csm.state = state + csm.pubSub.Publish(state) + + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan +} + +// ClientConnInterface defines the functions clients need to perform unary and +// streaming RPCs. It is implemented by *ClientConn, and is only intended to +// be referenced by generated code. +type ClientConnInterface interface { + // Invoke performs a unary RPC and returns after the response is received + // into reply. + Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error + // NewStream begins a streaming RPC. + NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) +} + +// Assert *ClientConn implements ClientConnInterface. +var _ ClientConnInterface = (*ClientConn)(nil) + +// ClientConn represents a virtual connection to a conceptual endpoint, to +// perform RPCs. +// +// A ClientConn is free to have zero or more actual connections to the endpoint +// based on configuration, load, etc. It is also free to determine which actual +// endpoints to use and may change it every RPC, permitting client-side load +// balancing. +// +// A ClientConn encapsulates a range of functionality including name +// resolution, TCP connection establishment (with retries and backoff) and TLS +// handshakes. It also handles errors on established connections by +// re-resolving the name and reconnecting. +type ClientConn struct { + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelzID *channelz.Identifier // Channelz identifier for the channel. + resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + idlenessMgr idle.Manager + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + blockingpicker *pickerWrapper + safeConfigSelector iresolver.SafeConfigSelector + czData *channelzData + retryThrottler atomic.Value // Updated from service config. + + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. + firstResolveEvent *grpcsync.Event + + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + idlenessState ccIdlenessState // Tracks idleness state of the channel. + exitIdleCond *sync.Cond // Signalled when channel exits idle. + + lceMu sync.Mutex // protects lastConnectionError + lastConnectionError error +} + +// ccIdlenessState tracks the idleness state of the channel. +// +// Channels start off in `active` and move to `idle` after a period of +// inactivity. When moving back to `active` upon an incoming RPC, they +// transition through `exiting_idle`. This state is useful for synchronization +// with Close(). +// +// This state tracking is mostly for self-protection. The idlenessManager is +// expected to keep track of the state as well, and is expected not to call into +// the ClientConn unnecessarily. +type ccIdlenessState int8 + +const ( + ccIdlenessStateActive ccIdlenessState = iota + ccIdlenessStateIdle + ccIdlenessStateExitingIdle +) + +func (s ccIdlenessState) String() string { + switch s { + case ccIdlenessStateActive: + return "active" + case ccIdlenessStateIdle: + return "idle" + case ccIdlenessStateExitingIdle: + return "exitingIdle" + default: + return "unknown" + } +} + +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true + } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() +} + +// Connect causes all subchannels in the ClientConn to attempt to connect if +// the channel is idle. Does not wait for the connection attempts to begin +// before returning. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a later +// release. +func (cc *ClientConn) Connect() { + cc.exitIdleMode() + // If the ClientConn was not in idle mode, we need to call ExitIdle on the + // LB policy so that connections can be created. + cc.balancerWrapper.exitIdleMode() +} + +func (cc *ClientConn) scWatcher() { + for { + select { + case sc, ok := <-cc.dopts.scChan: + if !ok { + return + } + cc.mu.Lock() + // TODO: load balance policy runtime change is ignored. + // We may revisit this decision in the future. + cc.sc = &sc + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) + cc.mu.Unlock() + case <-cc.ctx.Done(): + return + } + } +} + +// waitForResolvedAddrs blocks until the resolver has provided addresses or the +// context expires. Returns nil unless the context expires first; otherwise +// returns a status error based on the context. +func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { + // This is on the RPC path, so we use a fast path to avoid the + // more-expensive "select" below after the resolver has returned once. + if cc.firstResolveEvent.HasFired() { + return nil + } + select { + case <-cc.firstResolveEvent.Done(): + return nil + case <-ctx.Done(): + return status.FromContextError(ctx.Err()).Err() + case <-cc.ctx.Done(): + return ErrClientConnClosing + } +} + +var emptyServiceConfig *ServiceConfig + +func init() { + cfg := parseServiceConfig("{}") + if cfg.Err != nil { + panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) + } + emptyServiceConfig = cfg.Config.(*ServiceConfig) + + internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { + return cc.csMgr.pubSub.Subscribe(s) + } +} + +func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { + if cc.sc != nil { + cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs) + return + } + if cc.dopts.defaultServiceConfig != nil { + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs) + } else { + cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs) + } +} + +func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { + defer cc.firstResolveEvent.Fire() + cc.mu.Lock() + // Check if the ClientConn is already closed. Some fields (e.g. + // balancerWrapper) are set to nil when closing the ClientConn, and could + // cause nil pointer panic if we don't have this check. + if cc.conns == nil { + cc.mu.Unlock() + return nil + } + + if err != nil { + // May need to apply the initial service config in case the resolver + // doesn't support service configs, or doesn't provide a service config + // with the new addresses. + cc.maybeApplyDefaultServiceConfig(nil) + + cc.balancerWrapper.resolverError(err) + + // No addresses are valid with err set; return early. + cc.mu.Unlock() + return balancer.ErrBadResolverState + } + + var ret error + if cc.dopts.disableServiceConfig { + channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) + cc.maybeApplyDefaultServiceConfig(s.Addresses) + } else if s.ServiceConfig == nil { + cc.maybeApplyDefaultServiceConfig(s.Addresses) + // TODO: do we need to apply a failing LB policy if there is no + // default, per the error handling design? + } else { + if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok { + configSelector := iresolver.GetConfigSelector(s) + if configSelector != nil { + if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 { + channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector") + } + } else { + configSelector = &defaultConfigSelector{sc} + } + cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) + } else { + ret = balancer.ErrBadResolverState + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLB(s.ServiceConfig) + cc.mu.Unlock() + return ret + } + } + } + + var balCfg serviceconfig.LoadBalancingConfig + if cc.sc != nil && cc.sc.lbConfig != nil { + balCfg = cc.sc.lbConfig.cfg + } + bw := cc.balancerWrapper + cc.mu.Unlock() + + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) + if ret == nil { + ret = uccsErr // prefer ErrBadResolver state since any other error is + // currently meaningless to the caller. + } + return ret +} + +// applyFailingLB is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. +// +// Caller must hold cc.mu. +func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) + } else { + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) + } + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) +} + +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { + cc.balancerWrapper.updateSubConnState(sc, s, err) +} + +// Makes a copy of the input addresses slice and clears out the balancer +// attributes field. Addresses are passed during subconn creation and address +// update operations. In both cases, we will clear the balancer attributes by +// calling this function, and therefore we will be able to use the Equal method +// provided by the resolver.Address type for comparison. +func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { + out := make([]resolver.Address, len(in)) + for i := range in { + out[i] = in[i] + out[i].BalancerAttributes = nil + } + return out +} + +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// +// Caller needs to make sure len(addrs) > 0. +func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + ac := &addrConn{ + state: connectivity.Idle, + cc: cc, + addrs: copyAddressesWithoutBalancerAttributes(addrs), + scopts: opts, + dopts: cc.dopts, + czData: new(channelzData), + resetBackoff: make(chan struct{}), + stateChan: make(chan struct{}), + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.conns == nil { + return nil, ErrClientConnClosing + } + + var err error + ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") + if err != nil { + return nil, err + } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + + cc.conns[ac] = struct{}{} + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { + return &channelz.ChannelInternalMetric{ + State: cc.GetState(), + Target: cc.target, + CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), + } +} + +// Target returns the target string of the ClientConn. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) Target() string { + return cc.target +} + +func (cc *ClientConn) incrCallsStarted() { + atomic.AddInt64(&cc.czData.callsStarted, 1) + atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (cc *ClientConn) incrCallsSucceeded() { + atomic.AddInt64(&cc.czData.callsSucceeded, 1) +} + +func (cc *ClientConn) incrCallsFailed() { + atomic.AddInt64(&cc.czData.callsFailed, 1) +} + +// connect starts creating a transport. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +func (ac *addrConn) connect() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } + ac.mu.Unlock() + return nil + } + ac.mu.Unlock() + + ac.resetTransport() + return nil +} + +func equalAddresses(a, b []resolver.Address) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if !v.Equal(b[i]) { + return false + } + } + return true +} + +// updateAddrs updates ac.addrs with the new addresses list and handles active +// connections or connection attempts. +func (ac *addrConn) updateAddrs(addrs []resolver.Address) { + ac.mu.Lock() + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) + + addrs = copyAddressesWithoutBalancerAttributes(addrs) + if equalAddresses(ac.addrs, addrs) { + ac.mu.Unlock() + return + } + + ac.addrs = addrs + + if ac.state == connectivity.Shutdown || + ac.state == connectivity.TransientFailure || + ac.state == connectivity.Idle { + // We were not connecting, so do nothing but update the addresses. + ac.mu.Unlock() + return + } + + if ac.state == connectivity.Ready { + // Try to find the connected address. + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if a.Equal(ac.curAddr) { + // We are connected to a valid address, so do nothing but + // update the addresses. + ac.mu.Unlock() + return + } + } + } + + // We are either connected to the wrong address or currently connecting. + // Stop the current iteration and restart. + + ac.cancel() + ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) + + // We have to defer here because GracefulClose => onClose, which requires + // locking ac.mu. + if ac.transport != nil { + defer ac.transport.GracefulClose() + ac.transport = nil + } + + if len(addrs) == 0 { + ac.updateConnectivityState(connectivity.Idle, nil) + } + + ac.mu.Unlock() + + // Since we were connecting/connected, we should start a new connection + // attempt. + go ac.resetTransport() +} + +// getServerName determines the serverName to be used in the connection +// handshake. The default value for the serverName is the authority on the +// ClientConn, which either comes from the user's dial target or through an +// authority override specified using the WithAuthority dial option. Name +// resolvers can specify a per-address override for the serverName through the +// resolver.Address.ServerName field which is used only if the WithAuthority +// dial option was not used. The rationale is that per-address authority +// overrides specified by the name resolver can represent a security risk, while +// an override specified by the user is more dependable since they probably know +// what they are doing. +func (cc *ClientConn) getServerName(addr resolver.Address) string { + if cc.dopts.authority != "" { + return cc.dopts.authority + } + if addr.ServerName != "" { + return addr.ServerName + } + return cc.authority +} + +func getMethodConfig(sc *ServiceConfig, method string) MethodConfig { + if sc == nil { + return MethodConfig{} + } + if m, ok := sc.Methods[method]; ok { + return m + } + i := strings.LastIndex(method, "/") + if m, ok := sc.Methods[method[:i+1]]; ok { + return m + } + return sc.Methods[""] +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the service's default +// config under the service (i.e /service/) and then for the default for all services (empty string). +// +// If there is a default MethodConfig for the service, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + return getMethodConfig(cc.sc, method) +} + +func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { + cc.mu.RLock() + defer cc.mu.RUnlock() + if cc.sc == nil { + return nil + } + return cc.sc.healthCheckConfig +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { + return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + Ctx: ctx, + FullMethodName: method, + }) +} + +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { + if sc == nil { + // should never reach here. + return + } + cc.sc = sc + if configSelector != nil { + cc.safeConfigSelector.UpdateConfigSelector(configSelector) + } + + if cc.sc.retryThrottling != nil { + newThrottler := &retryThrottler{ + tokens: cc.sc.retryThrottling.MaxTokens, + max: cc.sc.retryThrottling.MaxTokens, + thresh: cc.sc.retryThrottling.MaxTokens / 2, + ratio: cc.sc.retryThrottling.TokenRatio, + } + cc.retryThrottler.Store(newThrottler) + } else { + cc.retryThrottler.Store((*retryThrottler)(nil)) + } + + var newBalancerName string + if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { + // No service config or no LB policy specified in config. + newBalancerName = PickFirstBalancerName + } else if cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { // cc.sc.LB != nil + newBalancerName = *cc.sc.LB + } + cc.balancerWrapper.switchTo(newBalancerName) +} + +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { + cc.mu.RLock() + r := cc.resolverWrapper + cc.mu.RUnlock() + if r == nil { + return + } + go r.resolveNow(o) +} + +// ResetConnectBackoff wakes up all subchannels in transient failure and causes +// them to attempt another connection immediately. It also resets the backoff +// times used for subsequent attempts regardless of the current state. +// +// In general, this function should not be used. Typical service or network +// outages result in a reasonable client reconnection strategy by default. +// However, if a previously unavailable network becomes available, this may be +// used to trigger an immediate reconnect. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (cc *ClientConn) ResetConnectBackoff() { + cc.mu.Lock() + conns := cc.conns + cc.mu.Unlock() + for ac := range conns { + ac.resetConnectBackoff() + } +} + +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + defer func() { + cc.cancel() + <-cc.csMgr.pubSub.Done() + }() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + + for cc.idlenessState == ccIdlenessStateExitingIdle { + cc.exitIdleCond.Wait() + } + + conns := cc.conns + cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) + + pWrapper := cc.blockingpicker + rWrapper := cc.resolverWrapper + bWrapper := cc.balancerWrapper + idlenessMgr := cc.idlenessMgr + cc.mu.Unlock() + + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. + if pWrapper != nil { + pWrapper.close() + } + if bWrapper != nil { + bWrapper.close() + } + if rWrapper != nil { + rWrapper.close() + } + if idlenessMgr != nil { + idlenessMgr.Close() + } + + for ac := range conns { + ac.tearDown(ErrClientConnClosing) + } + cc.addTraceEvent("deleted") + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelzID) + + return nil +} + +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + dopts dialOptions + acbw balancer.SubConn + scopts balancer.NewSubConnOptions + + // transport is set when there's a viable transport (note: ac state may not be READY as LB channel + // health checking may require server to report healthy to set ac to READY), and is reset + // to nil when the current transport should no longer be used to create a stream (e.g. after GoAway + // is received, transport is closed, ac has been torn down). + transport transport.ClientTransport // The current transport. + + mu sync.Mutex + curAddr resolver.Address // The current address. + addrs []resolver.Address // All addresses that the resolver resolved to. + + // Use updateConnectivityState for updating addrConn's connectivity state. + state connectivity.State + stateChan chan struct{} // closed and recreated on every state change. + + backoffIdx int // Needs to be stateful for resetConnectBackoff. + resetBackoff chan struct{} + + channelzID *channelz.Identifier + czData *channelzData +} + +// Note: this requires a lock on ac.mu. +func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) { + if ac.state == s { + return + } + // When changing states, reset the state change channel. + close(ac.stateChan) + ac.stateChan = make(chan struct{}) + ac.state = s + if lastErr == nil { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + } else { + channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) + } + ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) +} + +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + switch r { + case transport.GoAwayTooManyPings: + v := 2 * ac.dopts.copts.KeepaliveParams.Time + ac.cc.mu.Lock() + if v > ac.cc.mkp.Time { + ac.cc.mkp.Time = v + } + ac.cc.mu.Unlock() + } +} + +func (ac *addrConn) resetTransport() { + ac.mu.Lock() + acCtx := ac.ctx + if acCtx.Err() != nil { + ac.mu.Unlock() + return + } + + addrs := ac.addrs + backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx) + // This will be the duration that dial gets to finish. + dialDuration := minConnectTimeout + if ac.dopts.minConnectTimeout != nil { + dialDuration = ac.dopts.minConnectTimeout() + } + + if dialDuration < backoffFor { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + // We can potentially spend all the time trying the first address, and + // if the server accepts the connection and then hangs, the following + // addresses will never be tried. + // + // The spec doesn't mention what should be done for multiple addresses. + // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm + connectDeadline := time.Now().Add(dialDuration) + + ac.updateConnectivityState(connectivity.Connecting, nil) + ac.mu.Unlock() + + if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + ac.mu.Lock() + if acCtx.Err() != nil { + // addrConn was torn down. + ac.mu.Unlock() + return + } + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. + ac.updateConnectivityState(connectivity.TransientFailure, err) + + // Backoff. + b := ac.resetBackoff + ac.mu.Unlock() + + timer := time.NewTimer(backoffFor) + select { + case <-timer.C: + ac.mu.Lock() + ac.backoffIdx++ + ac.mu.Unlock() + case <-b: + timer.Stop() + case <-acCtx.Done(): + timer.Stop() + return + } + + ac.mu.Lock() + if acCtx.Err() == nil { + ac.updateConnectivityState(connectivity.Idle, err) + } + ac.mu.Unlock() + return + } + // Success; reset backoff. + ac.mu.Lock() + ac.backoffIdx = 0 + ac.mu.Unlock() +} + +// tryAllAddrs tries to creates a connection to the addresses, and stop when at +// the first successful one. It returns an error if no address was successfully +// connected, or updates ac appropriately with the new transport. +func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { + var firstConnErr error + for _, addr := range addrs { + if ctx.Err() != nil { + return errConnClosing + } + ac.mu.Lock() + + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + + copts := ac.dopts.copts + if ac.scopts.CredsBundle != nil { + copts.CredsBundle = ac.scopts.CredsBundle + } + ac.mu.Unlock() + + channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) + + err := ac.createTransport(ctx, addr, copts, connectDeadline) + if err == nil { + return nil + } + if firstConnErr == nil { + firstConnErr = err + } + ac.cc.updateConnectionError(err) + } + + // Couldn't connect to any address. + return firstConnErr +} + +// createTransport creates a connection to addr. It returns an error if the +// address was not successfully connected, or updates ac appropriately with the +// new transport. +func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { + addr.ServerName = ac.cc.getServerName(addr) + hctx, hcancel := context.WithCancel(ctx) + + onClose := func(r transport.GoAwayReason) { + ac.mu.Lock() + defer ac.mu.Unlock() + // adjust params based on GoAwayReason + ac.adjustParams(r) + if ctx.Err() != nil { + // Already shut down or connection attempt canceled. tearDown() or + // updateAddrs() already cleared the transport and canceled hctx + // via ac.ctx, and we expected this connection to be closed, so do + // nothing here. + return + } + hcancel() + if ac.transport == nil { + // We're still connecting to this address, which could error. Do + // not update the connectivity state or resolve; these will happen + // at the end of the tryAllAddrs connection loop in the event of an + // error. + return + } + ac.transport = nil + // Refresh the name resolver on any connection loss. + ac.cc.resolveNow(resolver.ResolveNowOptions{}) + // Always go idle and wait for the LB policy to initiate a new + // connection attempt. + ac.updateConnectivityState(connectivity.Idle, nil) + } + + connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) + defer cancel() + copts.ChannelzParentID = ac.channelzID + + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) + if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } + // newTr is either nil, or closed. + hcancel() + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) + return err + } + + ac.mu.Lock() + defer ac.mu.Unlock() + if ctx.Err() != nil { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // have been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // + // The error we pass to Close() is immaterial since there are no open + // streams at this point, so no trailers with error details will be sent + // out. We just need to pass a non-nil error. + // + // This can also happen when updateAddrs is called during a connection + // attempt. + go newTr.Close(transport.ErrConnClosing) + return nil + } + if hctx.Err() != nil { + // onClose was already called for this connection, but the connection + // was successfully established first. Consider it a success and set + // the new state to Idle. + ac.updateConnectivityState(connectivity.Idle, nil) + return nil + } + ac.curAddr = addr + ac.transport = newTr + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil +} + +// startHealthCheck starts the health checking stream (RPC) to watch the health +// stats of this connection if health checking is requested and configured. +// +// LB channel health checking is enabled when all requirements below are met: +// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption +// 2. internal.HealthCheckFunc is set by importing the grpc/health package +// 3. a service config with non-empty healthCheckConfig field is provided +// 4. the load balancer requests it +// +// It sets addrConn to READY if the health checking stream is not started. +// +// Caller must hold ac.mu. +func (ac *addrConn) startHealthCheck(ctx context.Context) { + var healthcheckManagingState bool + defer func() { + if !healthcheckManagingState { + ac.updateConnectivityState(connectivity.Ready, nil) + } + }() + + if ac.cc.dopts.disableHealthCheck { + return + } + healthCheckConfig := ac.cc.healthCheckConfig() + if healthCheckConfig == nil { + return + } + if !ac.scopts.HealthCheckEnabled { + return + } + healthCheckFunc := ac.cc.dopts.healthCheckFunc + if healthCheckFunc == nil { + // The health package is not imported to set health check function. + // + // TODO: add a link to the health check doc in the error message. + channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.") + return + } + + healthcheckManagingState = true + + // Set up the health check helper functions. + currentTr := ac.transport + newStream := func(method string) (any, error) { + ac.mu.Lock() + if ac.transport != currentTr { + ac.mu.Unlock() + return nil, status.Error(codes.Canceled, "the provided transport is no longer valid to use") + } + ac.mu.Unlock() + return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac) + } + setConnectivityState := func(s connectivity.State, lastErr error) { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.transport != currentTr { + return + } + ac.updateConnectivityState(s, lastErr) + } + // Start the health checking stream. + go func() { + err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) + if err != nil { + if status.Code(err) == codes.Unimplemented { + channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") + } else { + channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) + } + } + }() +} + +func (ac *addrConn) resetConnectBackoff() { + ac.mu.Lock() + close(ac.resetBackoff) + ac.backoffIdx = 0 + ac.resetBackoff = make(chan struct{}) + ac.mu.Unlock() +} + +// getReadyTransport returns the transport if ac's state is READY or nil if not. +func (ac *addrConn) getReadyTransport() transport.ClientTransport { + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.state == connectivity.Ready { + return ac.transport + } + return nil +} + +// getTransport waits until the addrconn is ready and returns the transport. +// If the context expires first, returns an appropriate status. If the +// addrConn is stopped first, returns an Unavailable status error. +func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { + for ctx.Err() == nil { + ac.mu.Lock() + t, state, sc := ac.transport, ac.state, ac.stateChan + ac.mu.Unlock() + if state == connectivity.Ready { + return t, nil + } + if state == connectivity.Shutdown { + return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") + } + + select { + case <-ctx.Done(): + case <-sc: + } + } + return nil, status.FromContextError(ctx.Err()).Err() +} + +// tearDown starts to tear down the addrConn. +// +// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct +// will leak. In most cases, call cc.removeAddrConn() instead. +func (ac *addrConn) tearDown(err error) { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + curTr := ac.transport + ac.transport = nil + // We have to set the state to Shutdown before anything else to prevent races + // between setting the state and logic that waits on context cancellation / etc. + ac.updateConnectivityState(connectivity.Shutdown, nil) + ac.cancel() + ac.curAddr = resolver.Address{} + + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) + ac.mu.Unlock() + + // We have to release the lock before the call to GracefulClose/Close here + // because both of them call onClose(), which requires locking ac.mu. + if curTr != nil { + if err == errConnDrain { + // Close the transport gracefully when the subConn is being shutdown. + // + // GracefulClose() may be executed multiple times if: + // - multiple GoAway frames are received from the server + // - there are concurrent name resolver or balancer triggered + // address removal and GoAway + curTr.GracefulClose() + } else { + // Hard close the transport when the channel is entering idle or is + // being shutdown. In the case where the channel is being shutdown, + // closing of transports is also taken care of by cancelation of cc.ctx. + // But in the case where the channel is entering idle, we need to + // explicitly close the transports here. Instead of distinguishing + // between these two cases, it is simpler to close the transport + // unconditionally here. + curTr.Close(err) + } + } +} + +func (ac *addrConn) getState() connectivity.State { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} + +func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { + ac.mu.Lock() + addr := ac.curAddr.Addr + ac.mu.Unlock() + return &channelz.ChannelInternalMetric{ + State: ac.getState(), + Target: addr, + CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), + } +} + +func (ac *addrConn) incrCallsStarted() { + atomic.AddInt64(&ac.czData.callsStarted, 1) + atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (ac *addrConn) incrCallsSucceeded() { + atomic.AddInt64(&ac.czData.callsSucceeded, 1) +} + +func (ac *addrConn) incrCallsFailed() { + atomic.AddInt64(&ac.czData.callsFailed, 1) +} + +type retryThrottler struct { + max float64 + thresh float64 + ratio float64 + + mu sync.Mutex + tokens float64 // TODO(dfawley): replace with atomic and remove lock. +} + +// throttle subtracts a retry token from the pool and returns whether a retry +// should be throttled (disallowed) based upon the retry throttling policy in +// the service config. +func (rt *retryThrottler) throttle() bool { + if rt == nil { + return false + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens-- + if rt.tokens < 0 { + rt.tokens = 0 + } + return rt.tokens <= rt.thresh +} + +func (rt *retryThrottler) successfulRPC() { + if rt == nil { + return + } + rt.mu.Lock() + defer rt.mu.Unlock() + rt.tokens += rt.ratio + if rt.tokens > rt.max { + rt.tokens = rt.max + } +} + +type channelzChannel struct { + cc *ClientConn +} + +func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { + return c.cc.channelzMetric() +} + +// ErrClientConnTimeout indicates that the ClientConn cannot establish the +// underlying connections within the specified timeout. +// +// Deprecated: This error is never returned by grpc and should not be +// referenced by users. +var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + +// getResolver finds the scheme in the cc's resolvers or the global registry. +// scheme should always be lowercase (typically by virtue of url.Parse() +// performing proper RFC3986 behavior). +func (cc *ClientConn) getResolver(scheme string) resolver.Builder { + for _, rb := range cc.dopts.resolvers { + if scheme == rb.Scheme() { + return rb + } + } + return resolver.Get(scheme) +} + +func (cc *ClientConn) updateConnectionError(err error) { + cc.lceMu.Lock() + cc.lastConnectionError = err + cc.lceMu.Unlock() +} + +func (cc *ClientConn) connectionError() error { + cc.lceMu.Lock() + defer cc.lceMu.Unlock() + return cc.lastConnectionError +} + +// parseTargetAndFindResolver parses the user's dial target and stores the +// parsed target in `cc.parsedTarget`. +// +// The resolver to use is determined based on the scheme in the parsed target +// and the same is stored in `cc.resolverBuilder`. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) parseTargetAndFindResolver() error { + channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) + + var rb resolver.Builder + parsedTarget, err := parseTarget(cc.target) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) + } else { + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.URL.Scheme) + if rb != nil { + cc.parsedTarget = parsedTarget + cc.resolverBuilder = rb + return nil + } + } + + // We are here because the user's dial target did not contain a scheme or + // specified an unregistered scheme. We should fallback to the default + // scheme, except when a custom dialer is specified in which case, we should + // always use passthrough scheme. + defScheme := resolver.GetDefaultScheme() + channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) + canonicalTarget := defScheme + ":///" + cc.target + + parsedTarget, err = parseTarget(canonicalTarget) + if err != nil { + channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) + return err + } + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + rb = cc.getResolver(parsedTarget.URL.Scheme) + if rb == nil { + return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) + } + cc.parsedTarget = parsedTarget + cc.resolverBuilder = rb + return nil +} + +// parseTarget uses RFC 3986 semantics to parse the given target into a +// resolver.Target struct containing url. Query params are stripped from the +// endpoint. +func parseTarget(target string) (resolver.Target, error) { + u, err := url.Parse(target) + if err != nil { + return resolver.Target{}, err + } + + return resolver.Target{URL: *u}, nil +} + +func encodeAuthority(authority string) string { + const upperhex = "0123456789ABCDEF" + + // Return for characters that must be escaped as per + // Valid chars are mentioned here: + // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 + shouldEscape := func(c byte) bool { + // Alphanum are always allowed. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '.', '~': // Unreserved characters + return false + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters + return false + case ':', '[', ']', '@': // Authority related delimeters + return false + } + // Everything else must be escaped. + return true + } + + hexCount := 0 + for i := 0; i < len(authority); i++ { + c := authority[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return authority + } + + required := len(authority) + 2*hexCount + t := make([]byte, required) + + j := 0 + // This logic is a barebones version of escape in the go net/url library. + for i := 0; i < len(authority); i++ { + switch c := authority[i]; { + case shouldEscape(c): + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + default: + t[j] = authority[i] + j++ + } + } + return string(t) +} + +// Determine channel authority. The order of precedence is as follows: +// - user specified authority override using `WithAuthority` dial option +// - creds' notion of server name for the authentication handshake +// - endpoint from dial target of the form "scheme://[authority]/endpoint" +// +// Stores the determined authority in `cc.authority`. +// +// Returns a non-nil error if the authority returned by the transport +// credentials do not match the authority configured through the dial option. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) determineAuthority() error { + dopts := cc.dopts + // Historically, we had two options for users to specify the serverName or + // authority for a channel. One was through the transport credentials + // (either in its constructor, or through the OverrideServerName() method). + // The other option (for cases where WithInsecure() dial option was used) + // was to use the WithAuthority() dial option. + // + // A few things have changed since: + // - `insecure` package with an implementation of the `TransportCredentials` + // interface for the insecure case + // - WithAuthority() dial option support for secure credentials + authorityFromCreds := "" + if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" { + authorityFromCreds = creds.Info().ServerName + } + authorityFromDialOption := dopts.authority + if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { + return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + } + + endpoint := cc.parsedTarget.Endpoint() + target := cc.target + switch { + case authorityFromDialOption != "": + cc.authority = authorityFromDialOption + case authorityFromCreds != "": + cc.authority = authorityFromCreds + case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): + // TODO: remove when the unix resolver implements optional interface to + // return channel authority. + cc.authority = "localhost" + case strings.HasPrefix(endpoint, ":"): + cc.authority = "localhost" + endpoint + default: + // TODO: Define an optional interface on the resolver builder to return + // the channel authority given the user's dial target. For resolvers + // which don't implement this interface, we will use the endpoint from + // "scheme://authority/endpoint" as the default authority. + // Escape the endpoint to handle use cases where the endpoint + // might not be a valid authority by default. + // For example an endpoint which has multiple paths like + // 'a/b/c', which is not a valid authority by default. + cc.authority = encodeAuthority(endpoint) + } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + return nil +} + +// initResolverWrapper creates a ccResolverWrapper, which builds the name +// resolver. This method grabs the lock to assign the newly built resolver +// wrapper to the cc.resolverWrapper field. +func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { + rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ + target: cc.parsedTarget, + builder: cc.resolverBuilder, + bOpts: resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: creds, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + }, + channelzID: cc.channelzID, + }) + if err != nil { + return fmt.Errorf("failed to build resolver: %v", err) + } + // Resolver implementations may report state update or error inline when + // built (or right after), and this is handled in cc.updateResolverState. + // Also, an error from the resolver might lead to a re-resolution request + // from the balancer, which is handled in resolveNow() where + // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. + cc.mu.Lock() + cc.resolverWrapper = rw + cc.mu.Unlock() + return nil +} diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 0000000000..411e3dfd47 --- /dev/null +++ b/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/encoding" + _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" +) + +// baseCodec contains the functionality of both Codec and encoding.Codec, but +// omits the name/string, which vary between the two and are not needed for +// anything besides the registry in the encoding package. +type baseCodec interface { + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error +} + +var _ baseCodec = Codec(nil) +var _ baseCodec = encoding.Codec(nil) + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +// +// Deprecated: use encoding.Codec instead. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v any) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v any) error + // String returns the name of the Codec implementation. This is unused by + // gRPC. + String() string +} diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh new file mode 100644 index 0000000000..4cdc6ba7c0 --- /dev/null +++ b/vendor/google.golang.org/grpc/codegen.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# This script serves as an example to demonstrate how to generate the gRPC-Go +# interface and the related messages from .proto file. +# +# It assumes the installation of i) Google proto buffer compiler at +# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen +# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have +# not, please install them first. +# +# We recommend running this script at $GOPATH/src. +# +# If this is not what you need, feel free to make your own scripts. Again, this +# script is for demonstration purpose. +# +proto=$1 +protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 0000000000..934fac2b09 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,111 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package codes + +import ( + "strconv" + + "google.golang.org/grpc/internal" +) + +func init() { + internal.CanonicalString = canonicalString +} + +func (c Code) String() string { + switch c { + case OK: + return "OK" + case Canceled: + return "Canceled" + case Unknown: + return "Unknown" + case InvalidArgument: + return "InvalidArgument" + case DeadlineExceeded: + return "DeadlineExceeded" + case NotFound: + return "NotFound" + case AlreadyExists: + return "AlreadyExists" + case PermissionDenied: + return "PermissionDenied" + case ResourceExhausted: + return "ResourceExhausted" + case FailedPrecondition: + return "FailedPrecondition" + case Aborted: + return "Aborted" + case OutOfRange: + return "OutOfRange" + case Unimplemented: + return "Unimplemented" + case Internal: + return "Internal" + case Unavailable: + return "Unavailable" + case DataLoss: + return "DataLoss" + case Unauthenticated: + return "Unauthenticated" + default: + return "Code(" + strconv.FormatInt(int64(c), 10) + ")" + } +} + +func canonicalString(c Code) string { + switch c { + case OK: + return "OK" + case Canceled: + return "CANCELLED" + case Unknown: + return "UNKNOWN" + case InvalidArgument: + return "INVALID_ARGUMENT" + case DeadlineExceeded: + return "DEADLINE_EXCEEDED" + case NotFound: + return "NOT_FOUND" + case AlreadyExists: + return "ALREADY_EXISTS" + case PermissionDenied: + return "PERMISSION_DENIED" + case ResourceExhausted: + return "RESOURCE_EXHAUSTED" + case FailedPrecondition: + return "FAILED_PRECONDITION" + case Aborted: + return "ABORTED" + case OutOfRange: + return "OUT_OF_RANGE" + case Unimplemented: + return "UNIMPLEMENTED" + case Internal: + return "INTERNAL" + case Unavailable: + return "UNAVAILABLE" + case DataLoss: + return "DATA_LOSS" + case Unauthenticated: + return "UNAUTHENTICATED" + default: + return "CODE(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 0000000000..11b106182d --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,244 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes // import "google.golang.org/grpc/codes" + +import ( + "fmt" + "strconv" +) + +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +type Code uint32 + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was canceled (typically by the caller). + // + // The gRPC framework will generate this error code when cancellation + // is requested. + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + // + // The gRPC framework will generate this error code in the above two + // mentioned cases. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + // + // This error code will not be generated by the gRPC framework. + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + // + // The gRPC framework will generate this error code when the deadline is + // exceeded. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + // + // This error code will not be generated by the gRPC framework. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + // + // This error code will not be generated by the gRPC framework. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + // + // This error code will not be generated by the gRPC core framework, + // but expect authentication middleware to use it. + PermissionDenied Code = 7 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + // + // This error code will be generated by the gRPC framework in + // out-of-memory and server overload situations, or when a message is + // larger than the configured maximum size. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + // + // This error code will not be generated by the gRPC framework. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + // + // This error code will not be generated by the gRPC framework. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + // + // This error code will not be generated by the gRPC framework. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + // + // This error code will be generated by the gRPC framework. Most + // commonly, you will see this error code when a method implementation + // is missing on the server. It can also be generated for unknown + // compression algorithms or a disagreement as to whether an RPC should + // be streaming. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + // + // This error code will be generated by the gRPC framework in several + // internal error conditions. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. Note that it is not always safe to retry + // non-idempotent operations. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + // + // This error code will be generated by the gRPC framework during + // abrupt shutdown of a server process or network connection. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + // + // This error code will not be generated by the gRPC framework. + DataLoss Code = 15 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + // + // The gRPC framework will generate this error code when the + // authentication metadata is invalid or a Credentials callback fails, + // but also expect authentication middleware to generate it. + Unauthenticated Code = 16 + + _maxCode = 17 +) + +var strToCode = map[string]Code{ + `"OK"`: OK, + `"CANCELLED"`:/* [sic] */ Canceled, + `"UNKNOWN"`: Unknown, + `"INVALID_ARGUMENT"`: InvalidArgument, + `"DEADLINE_EXCEEDED"`: DeadlineExceeded, + `"NOT_FOUND"`: NotFound, + `"ALREADY_EXISTS"`: AlreadyExists, + `"PERMISSION_DENIED"`: PermissionDenied, + `"RESOURCE_EXHAUSTED"`: ResourceExhausted, + `"FAILED_PRECONDITION"`: FailedPrecondition, + `"ABORTED"`: Aborted, + `"OUT_OF_RANGE"`: OutOfRange, + `"UNIMPLEMENTED"`: Unimplemented, + `"INTERNAL"`: Internal, + `"UNAVAILABLE"`: Unavailable, + `"DATA_LOSS"`: DataLoss, + `"UNAUTHENTICATED"`: Unauthenticated, +} + +// UnmarshalJSON unmarshals b into the Code. +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= _maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) +} diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 0000000000..4a89926422 --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,94 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +package connectivity + +import ( + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("core") + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + logger.Errorf("unknown connectivity state: %d", s) + return "INVALID_STATE" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClientConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// ServingMode indicates the current mode of operation of the server. +// +// Only xDS enabled gRPC servers currently report their serving mode. +type ServingMode int + +const ( + // ServingModeStarting indicates that the server is starting up. + ServingModeStarting ServingMode = iota + // ServingModeServing indicates that the server contains all required + // configuration and is serving RPCs. + ServingModeServing + // ServingModeNotServing indicates that the server is not accepting new + // connections. Existing connections will be closed gracefully, allowing + // in-progress RPCs to complete. A server enters this mode when it does not + // contain the required configuration to serve RPCs. + ServingModeNotServing +) + +func (s ServingMode) String() string { + switch s { + case ServingModeStarting: + return "STARTING" + case ServingModeServing: + return "SERVING" + case ServingModeNotServing: + return "NOT_SERVING" + default: + logger.Errorf("unknown serving mode: %d", s) + return "INVALID_MODE" + } +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 0000000000..5feac3aa0e --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,291 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials // import "google.golang.org/grpc/credentials" + +import ( + "context" + "errors" + "fmt" + "net" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/attributes" + icredentials "google.golang.org/grpc/internal/credentials" +) + +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { + // GetRequestMetadata gets the current request metadata, refreshing tokens + // if required. This should be called by the transport layer on each + // request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status for + // the RPC (restricted to an allowable set of codes as defined by gRFC + // A54). uri is the URI of the entry point for the request. When supported + // by the underlying implementation, ctx can be used for timeout and + // cancellation. Additionally, RequestInfo data will be available via ctx + // to this call. TODO(zhaoq): Define the set of the qualified keys instead + // of leaving it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentials requires + // transport security. + RequireTransportSecurity() bool +} + +// SecurityLevel defines the protection level on an established connection. +// +// This API is experimental. +type SecurityLevel int + +const ( + // InvalidSecurityLevel indicates an invalid security level. + // The zero SecurityLevel value is invalid for backward compatibility. + InvalidSecurityLevel SecurityLevel = iota + // NoSecurity indicates a connection is insecure. + NoSecurity + // IntegrityOnly indicates a connection only provides integrity protection. + IntegrityOnly + // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. + PrivacyAndIntegrity +) + +// String returns SecurityLevel in a string format. +func (s SecurityLevel) String() string { + switch s { + case NoSecurity: + return "NoSecurity" + case IntegrityOnly: + return "IntegrityOnly" + case PrivacyAndIntegrity: + return "PrivacyAndIntegrity" + } + return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) +} + +// CommonAuthInfo contains authenticated information common to AuthInfo implementations. +// It should be embedded in a struct implementing AuthInfo to provide additional information +// about the credentials. +// +// This API is experimental. +type CommonAuthInfo struct { + SecurityLevel SecurityLevel +} + +// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. +func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo { + return c +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, server name, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. It is a static version string from the + // credentials, not a value that reflects per-connection protocol negotiation. To retrieve + // details about the credentials used for a connection, use the Peer's AuthInfo field instead. + // + // Deprecated: please use Peer.AuthInfo. + SecurityVersion string + // ServerName is the user-configured server name. + ServerName string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +// A struct that implements AuthInfo should embed CommonAuthInfo by including additional +// information about the credentials in it. +type AuthInfo interface { + AuthType() string +} + +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") + +// TransportCredentials defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportCredentials interface { + // ClientHandshake does the authentication handshake specified by the + // corresponding authentication protocol on rawConn for clients. It returns + // the authenticated connection and the corresponding auth information + // about the connection. The auth information should embed CommonAuthInfo + // to return additional information about the credentials. Implementations + // must use the provided context to implement timely cancellation. gRPC + // will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the + // returned error is a wrapper error, implementations should make sure that + // the error implements Temporary() to have the correct retry behaviors. + // Additionally, ClientHandshakeInfo data will be available via the context + // passed to this call. + // + // The second argument to this method is the `:authority` header value used + // while creating new streams on this connection after authentication + // succeeds. Implementations must use this as the server name during the + // authentication handshake. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. The auth information should embed CommonAuthInfo to return additional information + // about the credentials. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. + Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials + // OverrideServerName specifies the value used for the following: + // - verifying the hostname on the returned certificates + // - as SNI in the client's handshake to support virtual hosting + // - as the value for `:authority` header at stream creation time + // + // Deprecated: use grpc.WithAuthority instead. Will be supported + // throughout 1.x. + OverrideServerName(string) error +} + +// Bundle is a combination of TransportCredentials and PerRPCCredentials. +// +// It also contains a mode switching method, so it can be used as a combination +// of different credential policies. +// +// Bundle cannot be used together with individual TransportCredentials. +// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials. +// +// This API is experimental. +type Bundle interface { + // TransportCredentials returns the transport credentials from the Bundle. + // + // Implementations must return non-nil transport credentials. If transport + // security is not needed by the Bundle, implementations may choose to + // return insecure.NewCredentials(). + TransportCredentials() TransportCredentials + + // PerRPCCredentials returns the per-RPC credentials from the Bundle. + // + // May be nil if per-RPC credentials are not needed. + PerRPCCredentials() PerRPCCredentials + + // NewWithMode should make a copy of Bundle, and switch mode. Modifying the + // existing Bundle may cause races. + // + // NewWithMode returns nil if the requested mode is not supported. + NewWithMode(mode string) (Bundle, error) +} + +// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls. +// +// This API is experimental. +type RequestInfo struct { + // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") + Method string + // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) + AuthInfo AuthInfo +} + +// RequestInfoFromContext extracts the RequestInfo from the context if it exists. +// +// This API is experimental. +func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { + ri, ok = icredentials.RequestInfoFromContext(ctx).(RequestInfo) + return ri, ok +} + +// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes +// it possible to pass arbitrary data to the handshaker from gRPC, resolver, +// balancer etc. Individual credential implementations control the actual +// format of the data that they are willing to receive. +// +// This API is experimental. +type ClientHandshakeInfo struct { + // Attributes contains the attributes for the address. It could be provided + // by the gRPC, resolver, balancer etc. + Attributes *attributes.Attributes +} + +// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored +// in ctx. +// +// This API is experimental. +func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo { + chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo) + return chi +} + +// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. +// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. +// +// This API is experimental. +func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error { + type internalInfo interface { + GetCommonAuthInfo() CommonAuthInfo + } + if ai == nil { + return errors.New("AuthInfo is nil") + } + if ci, ok := ai.(internalInfo); ok { + // CommonAuthInfo.SecurityLevel has an invalid value. + if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel { + return nil + } + if ci.GetCommonAuthInfo().SecurityLevel < level { + return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) + } + } + // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. + return nil +} + +// ChannelzSecurityInfo defines the interface that security protocols should implement +// in order to provide security info to channelz. +// +// This API is experimental. +type ChannelzSecurityInfo interface { + GetSecurityValue() ChannelzSecurityValue +} + +// ChannelzSecurityValue defines the interface that GetSecurityValue() return value +// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue +// and *OtherChannelzSecurityValue. +// +// This API is experimental. +type ChannelzSecurityValue interface { + isChannelzSecurityValue() +} + +// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return +// from GetSecurityValue(), which contains protocol specific security info. Note +// the Value field will be sent to users of channelz requesting channel info, and +// thus sensitive info should better be avoided. +// +// This API is experimental. +type OtherChannelzSecurityValue struct { + ChannelzSecurityValue + Name string + Value proto.Message +} diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go new file mode 100644 index 0000000000..82bee1443b --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -0,0 +1,98 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package insecure provides an implementation of the +// credentials.TransportCredentials interface which disables transport security. +package insecure + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// NewCredentials returns a credentials which disables transport security. +// +// Note that using this credentials with per-RPC credentials which require +// transport security is incompatible and will cause grpc.Dial() to fail. +func NewCredentials() credentials.TransportCredentials { + return insecureTC{} +} + +// insecureTC implements the insecure transport credentials. The handshake +// methods simply return the passed in net.Conn and set the security level to +// NoSecurity. +type insecureTC struct{} + +func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) { + return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil +} + +func (insecureTC) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{SecurityProtocol: "insecure"} +} + +func (insecureTC) Clone() credentials.TransportCredentials { + return insecureTC{} +} + +func (insecureTC) OverrideServerName(string) error { + return nil +} + +// info contains the auth information for an insecure connection. +// It implements the AuthInfo interface. +type info struct { + credentials.CommonAuthInfo +} + +// AuthType returns the type of info as a string. +func (info) AuthType() string { + return "insecure" +} + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go new file mode 100644 index 0000000000..877b7cd21a --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -0,0 +1,236 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "net/url" + "os" + + credinternal "google.golang.org/grpc/internal/credentials" +) + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState + CommonAuthInfo + // This API is experimental. + SPIFFEID *url.URL +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// GetSecurityValue returns security info requested by channelz. +func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { + v := &TLSChannelzSecurityValue{ + StandardName: cipherSuiteLookup[t.State.CipherSuite], + } + // Currently there's no way to get LocalCertificate info from tls package. + if len(t.State.PeerCertificates) > 0 { + v.RemoteCertificate = t.State.PeerCertificates[0].Raw + } + return v +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := credinternal.CloneTLSConfig(c.config) + if cfg.ServerName == "" { + serverName, _, err := net.SplitHostPort(authority) + if err != nil { + // If the authority had no host port or if the authority cannot be parsed, use it as-is. + serverName = authority + } + cfg.ServerName = serverName + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + close(errChannel) + }() + select { + case err := <-errChannel: + if err != nil { + conn.Close() + return nil, nil, err + } + case <-ctx.Done(): + conn.Close() + return nil, nil, ctx.Err() + } + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + conn.Close() + return nil, nil, err + } + tlsInfo := TLSInfo{ + State: conn.ConnectionState(), + CommonAuthInfo: CommonAuthInfo{ + SecurityLevel: PrivacyAndIntegrity, + }, + } + id := credinternal.SPIFFEIDFromState(conn.ConnectionState()) + if id != nil { + tlsInfo.SPIFFEID = id + } + return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{credinternal.CloneTLSConfig(c)} + tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the provided root +// certificate authority certificate(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the provided root +// certificate authority certificate file(s) to validate server connections. If +// certificates to establish the identity of the client need to be included in +// the credentials (eg: for mTLS), use NewTLS instead, where a complete +// tls.Config can be specified. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header +// field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := os.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} + +// TLSChannelzSecurityValue defines the struct that TLS protocol should return +// from GetSecurityValue(), containing security info like cipher and certificate used. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TLSChannelzSecurityValue struct { + ChannelzSecurityValue + StandardName string + LocalCertificate []byte + RemoteCertificate []byte +} + +var cipherSuiteLookup = map[uint16]string{ + tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", + tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", + tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", + tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", + tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", + tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", + tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go new file mode 100644 index 0000000000..1fd0d5c127 --- /dev/null +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -0,0 +1,715 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "net" + "time" + + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/channelz" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/internal" + internalbackoff "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" +) + +func init() { + internal.AddGlobalDialOptions = func(opt ...DialOption) { + globalDialOptions = append(globalDialOptions, opt...) + } + internal.ClearGlobalDialOptions = func() { + globalDialOptions = nil + } + internal.WithBinaryLogger = withBinaryLogger + internal.JoinDialOptions = newJoinDialOption + internal.DisableGlobalDialOptions = newDisableGlobalDialOptions +} + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + + chainUnaryInts []UnaryClientInterceptor + chainStreamInts []StreamClientInterceptor + + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + binaryLogger binarylog.Logger + copts transport.ConnectOptions + callOptions []CallOption + channelzParentID *channelz.Identifier + disableServiceConfig bool + disableRetry bool + disableHealthCheck bool + healthCheckFunc internal.HealthChecker + minConnectTimeout func() time.Duration + defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. + defaultServiceConfigRawJSON *string + resolvers []resolver.Builder + idleTimeout time.Duration + recvBufferPool SharedBufferPool +} + +// DialOption configures how we set up the connection. +type DialOption interface { + apply(*dialOptions) +} + +var globalDialOptions []DialOption + +// EmptyDialOption does not alter the dial configuration. It can be embedded in +// another structure to build custom dial options. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type EmptyDialOption struct{} + +func (EmptyDialOption) apply(*dialOptions) {} + +type disableGlobalDialOptions struct{} + +func (disableGlobalDialOptions) apply(*dialOptions) {} + +// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn +// from applying the global DialOptions (set via AddGlobalDialOptions). +func newDisableGlobalDialOptions() DialOption { + return &disableGlobalDialOptions{} +} + +// funcDialOption wraps a function that modifies dialOptions into an +// implementation of the DialOption interface. +type funcDialOption struct { + f func(*dialOptions) +} + +func (fdo *funcDialOption) apply(do *dialOptions) { + fdo.f(do) +} + +func newFuncDialOption(f func(*dialOptions)) *funcDialOption { + return &funcDialOption{ + f: f, + } +} + +type joinDialOption struct { + opts []DialOption +} + +func (jdo *joinDialOption) apply(do *dialOptions) { + for _, opt := range jdo.opts { + opt.apply(do) + } +} + +func newJoinDialOption(opts ...DialOption) DialOption { + return &joinDialOption{opts: opts} +} + +// WithSharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithSharedWriteBuffer(val bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.SharedWriteBuffer = val + }) +} + +// WithWriteBufferSize determines how much data can be batched before doing a +// write on the wire. The corresponding memory allocation for this buffer will +// be twice the size to keep syscalls low. The default value for this buffer is +// 32KB. +// +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. +func WithWriteBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.WriteBufferSize = s + }) +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how +// much data can be read at most for each read syscall. +// +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. +func WithReadBufferSize(s int) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.ReadBufferSize = s + }) +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial +// window size on a stream. The lower bound for window size is 64K and any value +// smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialWindowSize = s + }) +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for +// initial window size on a connection. The lower bound for window size is 64K +// and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + }) +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the +// client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will +// be supported throughout 1.x. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default +// CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + }) +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and +// unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be +// supported throughout 1.x. +func WithCodec(c Codec) DialOption { + return WithDefaultCallOptions(CallCustomCodec(c)) +} + +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by the +// UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead. Will be supported throughout 1.x. +func WithCompressor(cp Compressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.cp = cp + }) +} + +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression. If incoming response messages are encoded +// using the decompressor's Type(), it will be used. Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message. If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func WithDecompressor(dc Decompressor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.dc = dc + }) +} + +// WithServiceConfig returns a DialOption which has a channel to read the +// service configuration. +// +// Deprecated: service config should be received through name resolver or via +// WithDefaultServiceConfig, as specified at +// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be +// removed in a future 1.x release. +func WithServiceConfig(c <-chan ServiceConfig) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.scChan = c + }) +} + +// WithConnectParams configures the ClientConn to use the provided ConnectParams +// for creating and maintaining connections to servers. +// +// The backoff configuration specified as part of the ConnectParams overrides +// all defaults specified in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider +// using the backoff.DefaultConfig as a base, in cases where you want to +// override only a subset of the backoff configuration. +func WithConnectParams(p ConnectParams) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = internalbackoff.Exponential{Config: p.Backoff} + o.minConnectTimeout = func() time.Duration { + return p.MinConnectTimeout + } + }) +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x. +func WithBackoffConfig(b BackoffConfig) DialOption { + bc := backoff.DefaultConfig + bc.MaxDelay = b.MaxDelay + return withBackoff(internalbackoff.Exponential{Config: bc}) +} + +// withBackoff sets the backoff strategy used for connectRetryNum after a failed +// connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs internalbackoff.Strategy) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.bs = bs + }) +} + +// WithBlock returns a DialOption which makes callers of Dial block until the +// underlying connection is up. Without this, Dial returns immediately and +// connecting the server happens in background. +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +func WithBlock() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + }) +} + +// WithReturnConnectionError returns a DialOption which makes the client connection +// return a string containing both the last connection error that occurred and +// the context.DeadlineExceeded error. +// Implies WithBlock() +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithReturnConnectionError() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.block = true + o.returnLastError = true + }) +} + +// WithInsecure returns a DialOption which disables transport security for this +// ClientConn. Under the hood, it uses insecure.NewCredentials(). +// +// Note that using this DialOption with per-RPC credentials (through +// WithCredentialsBundle or WithPerRPCCredentials) which require transport +// security is incompatible and will cause grpc.Dial() to fail. +// +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. +func WithInsecure() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = insecure.NewCredentials() + }) +} + +// WithNoProxy returns a DialOption which disables the use of proxies for this +// ClientConn. This is ignored if WithDialer or WithContextDialer are used. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithNoProxy() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UseProxy = false + }) +} + +// WithTransportCredentials returns a DialOption which configures a connection +// level security credentials (e.g., TLS/SSL). This should not be used together +// with WithCredentialsBundle. +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.TransportCredentials = creds + }) +} + +// WithPerRPCCredentials returns a DialOption which sets credentials and places +// auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + }) +} + +// WithCredentialsBundle returns a DialOption to set a credentials bundle for +// the ClientConn.WithCreds. This should not be used together with +// WithTransportCredentials. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithCredentialsBundle(b credentials.Bundle) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.CredsBundle = b + }) +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a +// ClientConn initially. This is valid if and only if WithBlock() is present. +// +// Deprecated: use DialContext instead of Dial and context.WithTimeout +// instead. Will be supported throughout 1.x. +func WithTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.timeout = d + }) +} + +// WithContextDialer returns a DialOption that sets a dialer to create +// connections. If FailOnNonTempDialError() is set to true, and an error is +// returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.Dialer = f + }) +} + +func init() { + internal.WithHealthCheckFunc = withHealthCheckFunc +} + +// WithDialer returns a DialOption that specifies a function to use for dialing +// network addresses. If FailOnNonTempDialError() is set to true, and an error +// is returned by f, gRPC checks the error's Temporary() method to decide if it +// should try to reconnect to the network address. +// +// Deprecated: use WithContextDialer instead. Will be supported throughout +// 1.x. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return WithContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, time.Until(deadline)) + } + return f(addr, 0) + }) +} + +// WithStatsHandler returns a DialOption that specifies the stats handler for +// all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return newFuncDialOption(func(o *dialOptions) { + if h == nil { + logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.copts.StatsHandlers = append(o.copts.StatsHandlers, h) + }) +} + +// withBinaryLogger returns a DialOption that specifies the binary logger for +// this ClientConn. +func withBinaryLogger(bl binarylog.Logger) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.binaryLogger = bl + }) +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on +// non-temporary dial errors. If f is true, and dialer returns a non-temporary +// error, gRPC will fail the connection to the network address and won't try to +// reconnect. The default value of FailOnNonTempDialError is false. +// +// FailOnNonTempDialError only affects the initial dial, and does not do +// anything useful unless you are also using WithBlock(). +// +// Use of this feature is not recommended. For more information, please see: +// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func FailOnNonTempDialError(f bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + }) +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all +// the RPCs. +func WithUserAgent(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.UserAgent = s + }) +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters +// for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + if kp.Time < internal.KeepaliveMinPingTime { + logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime) + kp.Time = internal.KeepaliveMinPingTime + } + return newFuncDialOption(func(o *dialOptions) { + o.copts.KeepaliveParams = kp + }) +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for +// unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.unaryInt = f + }) +} + +// WithChainUnaryInterceptor returns a DialOption that specifies the chained +// interceptor for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithUnaryInterceptor will always be prepended to the chain. +func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for +// streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.streamInt = f + }) +} + +// WithChainStreamInterceptor returns a DialOption that specifies the chained +// interceptor for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All interceptors added by this method will be chained, and the interceptor +// defined by WithStreamInterceptor will always be prepended to the chain. +func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// WithAuthority returns a DialOption that specifies the value to be used as the +// :authority pseudo-header and as the server name in authentication handshake. +func WithAuthority(a string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.authority = a + }) +} + +// WithChannelzParentID returns a DialOption that specifies the channelz ID of +// current ClientConn's parent. This function is used in nested channel creation +// (e.g. grpclb dial). +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithChannelzParentID(id *channelz.Identifier) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.channelzParentID = id + }) +} + +// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +// +// Note that this dial option only disables service config from resolver. If +// default service config is provided, gRPC will use the default service config. +func WithDisableServiceConfig() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableServiceConfig = true + }) +} + +// WithDefaultServiceConfig returns a DialOption that configures the default +// service config, which will be used in cases where: +// +// 1. WithDisableServiceConfig is also used, or +// +// 2. The name resolver does not provide a service config or provides an +// invalid service config. +// +// The parameter s is the JSON representation of the default service config. +// For more information about service configs, see: +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +// For a simple example of usage, see: +// examples/features/load_balancing/client/main.go +func WithDefaultServiceConfig(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultServiceConfigRawJSON = &s + }) +} + +// WithDisableRetry returns a DialOption that disables retries, even if the +// service config enables them. This does not impact transparent retries, which +// will happen automatically if no data is written to the wire or if the RPC is +// unprocessed by the remote server. +func WithDisableRetry() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableRetry = true + }) +} + +// WithMaxHeaderListSize returns a DialOption that specifies the maximum +// (uncompressed) size of header list that the client is prepared to accept. +func WithMaxHeaderListSize(s uint32) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.MaxHeaderListSize = &s + }) +} + +// WithDisableHealthCheck disables the LB channel health checking for all +// SubConns of this ClientConn. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithDisableHealthCheck() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.disableHealthCheck = true + }) +} + +// withHealthCheckFunc replaces the default health check function with the +// provided one. It makes tests easier to change the health check function. +// +// For testing purpose only. +func withHealthCheckFunc(f internal.HealthChecker) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.healthCheckFunc = f + }) +} + +func defaultDialOptions() dialOptions { + return dialOptions{ + healthCheckFunc: internal.HealthCheckFunc, + copts: transport.ConnectOptions{ + WriteBufferSize: defaultWriteBufSize, + ReadBufferSize: defaultReadBufSize, + UseProxy: true, + }, + recvBufferPool: nopBufferPool{}, + } +} + +// withGetMinConnectDeadline specifies the function that clientconn uses to +// get minConnectDeadline. This can be used to make connection attempts happen +// faster/slower. +// +// For testing purpose only. +func withMinConnectDeadline(f func() time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.minConnectTimeout = f + }) +} + +// WithResolvers allows a list of resolver implementations to be registered +// locally with the ClientConn without needing to be globally registered via +// resolver.Register. They will be matched against the scheme used for the +// current Dial only, and will take precedence over the global registry. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithResolvers(rs ...resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolvers = append(o.resolvers, rs...) + }) +} + +// WithIdleTimeout returns a DialOption that configures an idle timeout for the +// channel. If the channel is idle for the configured timeout, i.e there are no +// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode +// and as a result the name resolver and load balancer will be shut down. The +// channel will exit idle mode when the Connect() method is called or when an +// RPC is initiated. +// +// By default this feature is disabled, which can also be explicitly configured +// by passing zero to this function. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithIdleTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.idleTimeout = d + }) +} + +// WithRecvBufferPool returns a DialOption that configures the ClientConn +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: WithStatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.recvBufferPool = bufferPool + }) +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 0000000000..0022859ad7 --- /dev/null +++ b/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,26 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate ./regenerate.sh + +/* +Package grpc implements an RPC system called gRPC. + +See grpc.io for more information about gRPC. +*/ +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go new file mode 100644 index 0000000000..69d5580b6a --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -0,0 +1,135 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package encoding defines the interface for the compressor and codec, and +// functions to register and retrieve compressors and codecs. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package encoding + +import ( + "io" + "strings" + + "google.golang.org/grpc/internal/grpcutil" +) + +// Identity specifies the optional encoding for uncompressed streams. +// It is intended for grpc internal use only. +const Identity = "identity" + +// Compressor is used for compressing and decompressing when sending or +// receiving messages. +type Compressor interface { + // Compress writes the data written to wc to w after compressing it. If an + // error occurs while initializing the compressor, that error is returned + // instead. + Compress(w io.Writer) (io.WriteCloser, error) + // Decompress reads data from r, decompresses it, and provides the + // uncompressed data via the returned io.Reader. If an error occurs while + // initializing the decompressor, that error is returned instead. + Decompress(r io.Reader) (io.Reader, error) + // Name is the name of the compression codec and is used to set the content + // coding header. The result must be static; the result cannot change + // between calls. + Name() string + // If a Compressor implements + // DecompressedSize(compressedBytes []byte) int, gRPC will call it + // to determine the size of the buffer allocated for the result of decompression. + // Return -1 to indicate unknown size. + // + // Experimental + // + // Notice: This API is EXPERIMENTAL and may be changed or removed in a + // later release. +} + +var registeredCompressor = make(map[string]Compressor) + +// RegisterCompressor registers the compressor with gRPC by its name. It can +// be activated when sending an RPC via grpc.UseCompressor(). It will be +// automatically accessed when receiving a message based on the content coding +// header. Servers also use it to send a response with the same encoding as +// the request. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCompressor(c Compressor) { + registeredCompressor[c.Name()] = c + if !grpcutil.IsCompressorNameRegistered(c.Name()) { + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) + } +} + +// GetCompressor returns Compressor for the given compressor name. +func GetCompressor(name string) Compressor { + return registeredCompressor[name] +} + +// Codec defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v any) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v any) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +var registeredCodecs = make(map[string]Codec) + +// RegisterCodec registers the provided Codec for use with all gRPC clients and +// servers. +// +// The Codec will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the Codec. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodec will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Codecs are +// registered with the same name, the one registered last will take effect. +func RegisterCodec(codec Codec) { + if codec == nil { + panic("cannot register a nil Codec") + } + if codec.Name() == "" { + panic("cannot register Codec with empty string result for Name()") + } + contentSubtype := strings.ToLower(codec.Name()) + registeredCodecs[contentSubtype] = codec +} + +// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodec(contentSubtype string) Codec { + return registeredCodecs[contentSubtype] +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go new file mode 100644 index 0000000000..0ee3d3bae9 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proto defines the protobuf codec. Importing this package will +// register the codec. +package proto + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { + encoding.RegisterCodec(codec{}) +} + +// codec is a Codec implementation with protobuf. It is the default codec for gRPC. +type codec struct{} + +func (codec) Marshal(v any) ([]byte, error) { + vv, ok := v.(proto.Message) + if !ok { + return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) + } + return proto.Marshal(vv) +} + +func (codec) Unmarshal(data []byte, v any) error { + vv, ok := v.(proto.Message) + if !ok { + return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) + } + return proto.Unmarshal(data, vv) +} + +func (codec) Name() string { + return Name +} diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go new file mode 100644 index 0000000000..ac73c9ced2 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -0,0 +1,117 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" + + "google.golang.org/grpc/internal/grpclog" +) + +// componentData records the settings for a component. +type componentData struct { + name string +} + +var cache = map[string]*componentData{} + +func (c *componentData) InfoDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.InfoDepth(depth+1, args...) +} + +func (c *componentData) WarningDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.WarningDepth(depth+1, args...) +} + +func (c *componentData) ErrorDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.ErrorDepth(depth+1, args...) +} + +func (c *componentData) FatalDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) + grpclog.FatalDepth(depth+1, args...) +} + +func (c *componentData) Info(args ...any) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warning(args ...any) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Error(args ...any) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatal(args ...any) { + c.FatalDepth(1, args...) +} + +func (c *componentData) Infof(format string, args ...any) { + c.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Warningf(format string, args ...any) { + c.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Errorf(format string, args ...any) { + c.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Fatalf(format string, args ...any) { + c.FatalDepth(1, fmt.Sprintf(format, args...)) +} + +func (c *componentData) Infoln(args ...any) { + c.InfoDepth(1, args...) +} + +func (c *componentData) Warningln(args ...any) { + c.WarningDepth(1, args...) +} + +func (c *componentData) Errorln(args ...any) { + c.ErrorDepth(1, args...) +} + +func (c *componentData) Fatalln(args ...any) { + c.FatalDepth(1, args...) +} + +func (c *componentData) V(l int) bool { + return V(l) +} + +// Component creates a new component and returns it for logging. If a component +// with the name already exists, nothing will be created and it will be +// returned. SetLoggerV2 will panic if it is called with a logger created by +// Component. +func Component(componentName string) DepthLoggerV2 { + if cData, ok := cache[componentName]; ok { + return cData + } + c := &componentData{componentName} + cache[componentName] = c + return c +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 0000000000..16928c9cb9 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport and grpclb packages only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import ( + "os" + + "google.golang.org/grpc/internal/grpclog" +) + +func init() { + SetLoggerV2(newLoggerV2()) +} + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return grpclog.Logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...any) { + grpclog.Logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...any) { + grpclog.Logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...any) { + grpclog.Logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...any) { + grpclog.Logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...any) { + grpclog.Logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...any) { + grpclog.Logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...any) { + grpclog.Logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...any) { + grpclog.Logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...any) { + grpclog.Logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...any) { + grpclog.Logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calls os.Exit() with exit code 1. +func Fatalf(format string, args ...any) { + grpclog.Logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...any) { + grpclog.Logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// +// Deprecated: use Info. +func Print(args ...any) { + grpclog.Logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// +// Deprecated: use Infof. +func Printf(format string, args ...any) { + grpclog.Logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// +// Deprecated: use Infoln. +func Println(args ...any) { + grpclog.Logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 0000000000..b1674d8267 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,87 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import "google.golang.org/grpc/internal/grpclog" + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { + grpclog.Logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...any) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...any) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...any) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...any) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...any) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...any) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...any) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...any) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...any) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 0000000000..ecfd36d713 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,258 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "encoding/json" + "fmt" + "io" + "log" + "os" + "strconv" + "strings" + + "google.golang.org/grpc/internal/grpclog" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...any) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...any) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...any) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...any) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...any) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...any) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...any) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...any) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...any) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...any) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...any) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...any) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + if _, ok := l.(*componentData); ok { + panic("cannot use component logger as grpclog logger") + } + grpclog.Logger = l + grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int + jsonFormat bool +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{}) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v}) +} + +type loggerV2Config struct { + verbose int + jsonFormat bool +} + +func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 { + var m []*log.Logger + flag := log.LstdFlags + if c.jsonFormat { + flag = 0 + } + m = append(m, log.New(infoW, "", flag)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, "", flag)) + m = append(m, log.New(ew, "", flag)) + return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := io.Discard + warningW := io.Discard + infoW := io.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + + jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json") + + return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{ + verbose: v, + jsonFormat: jsonFormat, + }) +} + +func (g *loggerT) output(severity int, s string) { + sevStr := severityName[severity] + if !g.jsonFormat { + g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s)) + return + } + // TODO: we can also include the logging component, but that needs more + // (API) changes. + b, _ := json.Marshal(map[string]string{ + "severity": sevStr, + "message": s, + }) + g.m[severity].Output(2, string(b)) +} + +func (g *loggerT) Info(args ...any) { + g.output(infoLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Infoln(args ...any) { + g.output(infoLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Infof(format string, args ...any) { + g.output(infoLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Warning(args ...any) { + g.output(warningLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Warningln(args ...any) { + g.output(warningLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Warningf(format string, args ...any) { + g.output(warningLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Error(args ...any) { + g.output(errorLog, fmt.Sprint(args...)) +} + +func (g *loggerT) Errorln(args ...any) { + g.output(errorLog, fmt.Sprintln(args...)) +} + +func (g *loggerT) Errorf(format string, args ...any) { + g.output(errorLog, fmt.Sprintf(format, args...)) +} + +func (g *loggerT) Fatal(args ...any) { + g.output(fatalLog, fmt.Sprint(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalln(args ...any) { + g.output(fatalLog, fmt.Sprintln(args...)) + os.Exit(1) +} + +func (g *loggerT) Fatalf(format string, args ...any) { + g.output(fatalLog, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + LoggerV2 + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. + InfoDepth(depth int, args ...any) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. + WarningDepth(depth int, args ...any) + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. + ErrorDepth(depth int, args ...any) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. + FatalDepth(depth int, args ...any) +} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 0000000000..877d78fc3d --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,104 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" +) + +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. +// Unary interceptors can be specified as a DialOption, using +// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a +// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC +// delegates all unary RPC invocations to the interceptor, and it is the +// responsibility of the interceptor to call invoker to complete the processing +// of the RPC. +// +// method is the RPC name. req and reply are the corresponding request and +// response messages. cc is the ClientConn on which the RPC was invoked. invoker +// is the handler to complete the RPC and it is the responsibility of the +// interceptor to call it. opts contain all applicable call options, including +// defaults from the ClientConn as well as per-call options. +// +// The returned error must be compatible with the status package. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of a ClientStream. Stream +// interceptors can be specified as a DialOption, using WithStreamInterceptor() +// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream +// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations +// to the interceptor, and it is the responsibility of the interceptor to call +// streamer. +// +// desc contains a description of the stream. cc is the ClientConn on which the +// RPC was invoked. streamer is the handler to create a ClientStream and it is +// the responsibility of the interceptor to call it. opts contain all applicable +// call options, including defaults from the ClientConn as well as per-call +// options. +// +// StreamClientInterceptor may return a custom ClientStream to intercept all I/O +// operations. The returned error must be compatible with the status package. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server any + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. +type UnaryHandler func(ctx context.Context, req any) (any, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go new file mode 100644 index 0000000000..5fc0ee3da5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -0,0 +1,73 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package backoff implement the backoff strategy for gRPC. +// +// This is kept in internal until the gRPC project decides whether or not to +// allow alternative backoff strategies. +package backoff + +import ( + "time" + + grpcbackoff "google.golang.org/grpc/backoff" + "google.golang.org/grpc/internal/grpcrand" +) + +// Strategy defines the methodology for backing off after a grpc connection +// failure. +type Strategy interface { + // Backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + Backoff(retries int) time.Duration +} + +// DefaultExponential is an exponential backoff implementation using the +// default values for all the configurable knobs defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig} + +// Exponential implements exponential backoff algorithm as defined in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +type Exponential struct { + // Config contains all options to configure the backoff algorithm. + Config grpcbackoff.Config +} + +// Backoff returns the amount of time to wait before the next retry given the +// number of retries. +func (bc Exponential) Backoff(retries int) time.Duration { + if retries == 0 { + return bc.Config.BaseDelay + } + backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.Config.Multiplier + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 0000000000..3c594e6e4e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,385 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return errBalancerClosed + } + bw := &balancerWrapper{ + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return errBalancerClosed + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + gsb.mu.Lock() + defer gsb.mu.Unlock() + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// updateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + gsb.mu.Unlock() + return + } + if state.ConnectivityState == connectivity.Shutdown { + delete(balToUpdate.subconns, sc) + } + gsb.mu.Unlock() + if cb != nil { + cb(state) + } else { + balToUpdate.UpdateSubConnState(sc, state) + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.updateSubConnState(sc, state, nil) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/shutdown +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +// Close closes the underlying LB policy and shuts down the subconns it +// created. bw must not be referenced via balancerCurrent or balancerPending in +// gsb when called. gsb.mu must not be held. Does not panic with a nil +// receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + sc.Shutdown() + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + sc.Shutdown() + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + // Note: existing third party balancers may call this, so it must remain + // until RemoveSubConn is fully removed. + sc.Shutdown() +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go new file mode 100644 index 0000000000..94a08d6875 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -0,0 +1,46 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package balancerload defines APIs to parse server loads in trailers. The +// parsed loads are sent to balancers in DoneInfo. +package balancerload + +import ( + "google.golang.org/grpc/metadata" +) + +// Parser converts loads from metadata into a concrete type. +type Parser interface { + // Parse parses loads from metadata. + Parse(md metadata.MD) any +} + +var parser Parser + +// SetParser sets the load parser. +// +// Not mutex-protected, should be called before any gRPC functions. +func SetParser(lr Parser) { + parser = lr +} + +// Parse calls parser.Read(). +func Parse(md metadata.MD) any { + if parser == nil { + return nil + } + return parser.Parse(md) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go new file mode 100644 index 0000000000..755fdebc1b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -0,0 +1,192 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package binarylog implementation binary logging as defined in +// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md. +package binarylog + +import ( + "fmt" + "os" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpcutil" +) + +var grpclogLogger = grpclog.Component("binarylog") + +// Logger specifies MethodLoggers for method names with a Log call that +// takes a context. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +type Logger interface { + GetMethodLogger(methodName string) MethodLogger +} + +// binLogger is the global binary logger for the binary. One of this should be +// built at init time from the configuration (environment variable or flags). +// +// It is used to get a MethodLogger for each individual method. +var binLogger Logger + +// SetLogger sets the binary logger. +// +// Only call this at init time. +func SetLogger(l Logger) { + binLogger = l +} + +// GetLogger gets the binary logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + +// GetMethodLogger returns the MethodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each MethodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func GetMethodLogger(methodName string) MethodLogger { + if binLogger == nil { + return nil + } + return binLogger.GetMethodLogger(methodName) +} + +func init() { + const envStr = "GRPC_BINARY_LOG_FILTER" + configStr := os.Getenv(envStr) + binLogger = NewLoggerFromConfigString(configStr) +} + +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { + // Max length of header and message. + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} +} + +type logger struct { + config LoggerConfig +} + +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} +} + +// newEmptyLogger creates an empty logger. The map fields need to be filled in +// using the set* functions. +func newEmptyLogger() *logger { + return &logger{} +} + +// Set method logger for "*". +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { + return fmt.Errorf("conflicting global rules found") + } + l.config.All = ml + return nil +} + +// Set method logger for "service/*". +// +// New MethodLogger with same service overrides the old one. +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { + return fmt.Errorf("conflicting service rules for service %v found", service) + } + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) + } + l.config.Services[service] = ml + return nil +} + +// Set method logger for "service/method". +// +// New MethodLogger with same method overrides the old one. +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.config.Methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) + } + l.config.Methods[method] = ml + return nil +} + +// Set blacklist method for "-service/method". +func (l *logger) setBlacklist(method string) error { + if _, ok := l.config.Blacklist[method]; ok { + return fmt.Errorf("conflicting blacklist rules for method %v found", method) + } + if _, ok := l.config.Methods[method]; ok { + return fmt.Errorf("conflicting method rules for method %v found", method) + } + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) + } + l.config.Blacklist[method] = struct{}{} + return nil +} + +// getMethodLogger returns the MethodLogger for the given methodName. +// +// methodName should be in the format of "/service/method". +// +// Each MethodLogger returned by this method is a new instance. This is to +// generate sequence id within the call. +func (l *logger) GetMethodLogger(methodName string) MethodLogger { + s, m, err := grpcutil.ParseMethod(methodName) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) + return nil + } + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) + } + if _, ok := l.config.Blacklist[s+"/"+m]; ok { + return nil + } + if ml, ok := l.config.Services[s]; ok { + return NewTruncatingMethodLogger(ml.Header, ml.Message) + } + if l.config.All == nil { + return nil + } + return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go new file mode 100644 index 0000000000..1ee00a39ac --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains exported variables/functions that are exported for testing +// only. +// +// An ideal way for this would be to put those in a *_test.go but in binarylog +// package. But this doesn't work with staticcheck with go module. Error was: +// "MdToMetadataProto not declared by package binarylog". This could be caused +// by the way staticcheck looks for files for a certain package, which doesn't +// support *_test.go files. +// +// Move those to binary_test.go when staticcheck is fixed. + +package binarylog + +var ( + // AllLogger is a logger that logs all headers/messages for all RPCs. It's + // for testing only. + AllLogger = NewLoggerFromConfigString("*") + // MdToMetadataProto converts metadata to a binary logging proto message. + // It's for testing only. + MdToMetadataProto = mdToMetadataProto + // AddrToProto converts an address to a binary logging proto message. It's + // for testing only. + AddrToProto = addrToProto +) diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go new file mode 100644 index 0000000000..f9e80e27ab --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -0,0 +1,208 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// NewLoggerFromConfigString reads the string and build a logger. It can be used +// to build a new logger and assign it to binarylog.Logger. +// +// Example filter config strings: +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. +// +// If two configs exist for one certain method or service, the one specified +// later overrides the previous config. +func NewLoggerFromConfigString(s string) Logger { + if s == "" { + return nil + } + l := newEmptyLogger() + methods := strings.Split(s, ",") + for _, method := range methods { + if err := l.fillMethodLoggerWithConfigString(method); err != nil { + grpclogLogger.Warningf("failed to parse binary log config: %v", err) + return nil + } + } + return l +} + +// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds +// it to the right map in the logger. +func (l *logger) fillMethodLoggerWithConfigString(config string) error { + // "" is invalid. + if config == "" { + return errors.New("empty string is not a valid method binary logging config") + } + + // "-service/method", blacklist, no * or {} allowed. + if config[0] == '-' { + s, m, suffix, err := parseMethodConfigAndSuffix(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if m == "*" { + return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config") + } + if suffix != "" { + return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config") + } + if err := l.setBlacklist(s + "/" + m); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + // "*{h:256;m:256}" + if config[0] == '*' { + hdr, msg, err := parseHeaderMessageLengthConfig(config[1:]) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + return nil + } + + s, m, suffix, err := parseMethodConfigAndSuffix(config) + if err != nil { + return fmt.Errorf("invalid config: %q, %v", config, err) + } + hdr, msg, err := parseHeaderMessageLengthConfig(suffix) + if err != nil { + return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) + } + if m == "*" { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } else { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { + return fmt.Errorf("invalid config: %v", err) + } + } + return nil +} + +const ( + // TODO: this const is only used by env_config now. But could be useful for + // other config. Move to binarylog.go if necessary. + maxUInt = ^uint64(0) + + // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for + // expected output. + longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$` + + // For suffix from above, "{h:123,m:123}". See test for expected output. + optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123". + headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$` + messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$` + headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$` +) + +var ( + longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr) + headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr) + messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr) + headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr) +) + +// Turn "service/method{h;m}" into "service", "method", "{h;m}". +func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) { + // Regexp result: + // + // in: "p.s/m{h:123,m:123}", + // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"}, + match := longMethodConfigRegexp.FindStringSubmatch(c) + if match == nil { + return "", "", "", fmt.Errorf("%q contains invalid substring", c) + } + service = match[1] + method = match[2] + suffix = match[3] + return +} + +// Turn "{h:123;m:345}" into 123, 345. +// +// Return maxUInt if length is unspecified. +func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) { + if c == "" { + return maxUInt, maxUInt, nil + } + // Header config only. + if match := headerConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return hdrLenStr, 0, nil + } + return maxUInt, 0, nil + } + + // Message config only. + if match := messageConfigRegexp.FindStringSubmatch(c); match != nil { + if s := match[1]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + return 0, msgLenStr, nil + } + return 0, maxUInt, nil + } + + // Header and message config both. + if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil { + // Both hdr and msg are specified, but one or two of them might be empty. + hdrLenStr = maxUInt + msgLenStr = maxUInt + if s := match[1]; s != "" { + hdrLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + if s := match[2]; s != "" { + msgLenStr, err = strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, 0, fmt.Errorf("failed to convert %q to uint", s) + } + } + return hdrLenStr, msgLenStr, nil + } + return 0, 0, fmt.Errorf("%q contains invalid substring", c) +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go new file mode 100644 index 0000000000..0f31274a3c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -0,0 +1,445 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "context" + "net" + "strings" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +type callIDGenerator struct { + id uint64 +} + +func (g *callIDGenerator) next() uint64 { + id := atomic.AddUint64(&g.id, 1) + return id +} + +// reset is for testing only, and doesn't need to be thread safe. +func (g *callIDGenerator) reset() { + g.id = 0 +} + +var idGen callIDGenerator + +// MethodLogger is the sub-logger for each method. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +type MethodLogger interface { + Log(context.Context, LogEntryConfig) +} + +// TruncatingMethodLogger is a method logger that truncates headers and messages +// based on configured fields. +type TruncatingMethodLogger struct { + headerMaxLen, messageMaxLen uint64 + + callID uint64 + idWithinCallGen *callIDGenerator + + sink Sink // TODO(blog): make this plugable. +} + +// NewTruncatingMethodLogger returns a new truncating method logger. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { + return &TruncatingMethodLogger{ + headerMaxLen: h, + messageMaxLen: m, + + callID: idGen.next(), + idWithinCallGen: &callIDGenerator{}, + + sink: DefaultSink, // TODO(blog): make it plugable. + } +} + +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in TruncatingMethodLogger as possible. +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { + m := c.toProto() + timestamp, _ := ptypes.TimestampProto(time.Now()) + m.Timestamp = timestamp + m.CallId = ml.callID + m.SequenceIdWithinCall = ml.idWithinCallGen.next() + + switch pay := m.Payload.(type) { + case *binlogpb.GrpcLogEntry_ClientHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) + case *binlogpb.GrpcLogEntry_ServerHeader: + m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) + case *binlogpb.GrpcLogEntry_Message: + m.PayloadTruncated = ml.truncateMessage(pay.Message) + } + return m +} + +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) +} + +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) { + if ml.headerMaxLen == maxUInt { + return false + } + var ( + bytesLimit = ml.headerMaxLen + index int + ) + // At the end of the loop, index will be the first entry where the total + // size is greater than the limit: + // + // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr. + for ; index < len(mdPb.Entry); index++ { + entry := mdPb.Entry[index] + if entry.Key == "grpc-trace-bin" { + // "grpc-trace-bin" is a special key. It's kept in the log entry, + // but not counted towards the size limit. + continue + } + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) + if currentEntryLen > bytesLimit { + break + } + bytesLimit -= currentEntryLen + } + truncated = index < len(mdPb.Entry) + mdPb.Entry = mdPb.Entry[:index] + return truncated +} + +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) { + if ml.messageMaxLen == maxUInt { + return false + } + if ml.messageMaxLen >= uint64(len(msgPb.Data)) { + return false + } + msgPb.Data = msgPb.Data[:ml.messageMaxLen] + return true +} + +// LogEntryConfig represents the configuration for binary log entry. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. +type LogEntryConfig interface { + toProto() *binlogpb.GrpcLogEntry +} + +// ClientHeader configs the binary log entry to be a ClientHeader entry. +type ClientHeader struct { + OnClientSide bool + Header metadata.MD + MethodName string + Authority string + Timeout time.Duration + // PeerAddr is required only when it's on server side. + PeerAddr net.Addr +} + +func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { + // This function doesn't need to set all the fields (e.g. seq ID). The Log + // function will set the fields when necessary. + clientHeader := &binlogpb.ClientHeader{ + Metadata: mdToMetadataProto(c.Header), + MethodName: c.MethodName, + Authority: c.Authority, + } + if c.Timeout > 0 { + clientHeader.Timeout = ptypes.DurationProto(c.Timeout) + } + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ + ClientHeader: clientHeader, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ServerHeader configs the binary log entry to be a ServerHeader entry. +type ServerHeader struct { + OnClientSide bool + Header metadata.MD + // PeerAddr is required only when it's on client side. + PeerAddr net.Addr +} + +func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ + Metadata: mdToMetadataProto(c.Header), + }, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// ClientMessage configs the binary log entry to be a ClientMessage entry. +type ClientMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message any +} + +func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerMessage configs the binary log entry to be a ServerMessage entry. +type ServerMessage struct { + OnClientSide bool + // Message can be a proto.Message or []byte. Other messages formats are not + // supported. + Message any +} + +func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { + var ( + data []byte + err error + ) + if m, ok := c.Message.(proto.Message); ok { + data, err = proto.Marshal(m) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err) + } + } else if b, ok := c.Message.([]byte); ok { + data = b + } else { + grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") + } + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ + Length: uint32(len(data)), + Data: data, + }, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry. +type ClientHalfClose struct { + OnClientSide bool +} + +func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, + Payload: nil, // No payload here. + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// ServerTrailer configs the binary log entry to be a ServerTrailer entry. +type ServerTrailer struct { + OnClientSide bool + Trailer metadata.MD + // Err is the status error. + Err error + // PeerAddr is required only when it's on client side and the RPC is trailer + // only. + PeerAddr net.Addr +} + +func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { + st, ok := status.FromError(c.Err) + if !ok { + grpclogLogger.Info("binarylogging: error in trailer is not a status error") + } + var ( + detailsBytes []byte + err error + ) + stProto := st.Proto() + if stProto != nil && len(stProto.Details) != 0 { + detailsBytes, err = proto.Marshal(stProto) + if err != nil { + grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) + } + } + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ + Metadata: mdToMetadataProto(c.Trailer), + StatusCode: uint32(st.Code()), + StatusMessage: st.Message(), + StatusDetails: detailsBytes, + }, + }, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + if c.PeerAddr != nil { + ret.Peer = addrToProto(c.PeerAddr) + } + return ret +} + +// Cancel configs the binary log entry to be a Cancel entry. +type Cancel struct { + OnClientSide bool +} + +func (c *Cancel) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, + Payload: nil, + } + if c.OnClientSide { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT + } else { + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER + } + return ret +} + +// metadataKeyOmit returns whether the metadata entry with this key should be +// omitted. +func metadataKeyOmit(key string) bool { + switch key { + case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te": + return true + case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users. + return false + } + return strings.HasPrefix(key, "grpc-") +} + +func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { + ret := &binlogpb.Metadata{} + for k, vv := range md { + if metadataKeyOmit(k) { + continue + } + for _, v := range vv { + ret.Entry = append(ret.Entry, + &binlogpb.MetadataEntry{ + Key: k, + Value: []byte(v), + }, + ) + } + } + return ret +} + +func addrToProto(addr net.Addr) *binlogpb.Address { + ret := &binlogpb.Address{} + switch a := addr.(type) { + case *net.TCPAddr: + if a.IP.To4() != nil { + ret.Type = binlogpb.Address_TYPE_IPV4 + } else if a.IP.To16() != nil { + ret.Type = binlogpb.Address_TYPE_IPV6 + } else { + ret.Type = binlogpb.Address_TYPE_UNKNOWN + // Do not set address and port fields. + break + } + ret.Address = a.IP.String() + ret.IpPort = uint32(a.Port) + case *net.UnixAddr: + ret.Type = binlogpb.Address_TYPE_UNIX + ret.Address = a.String() + default: + ret.Type = binlogpb.Address_TYPE_UNKNOWN + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go new file mode 100644 index 0000000000..264de387c2 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -0,0 +1,170 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package binarylog + +import ( + "bufio" + "encoding/binary" + "io" + "sync" + "time" + + "github.com/golang/protobuf/proto" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" +) + +var ( + // DefaultSink is the sink where the logs will be written to. It's exported + // for the binarylog package to update. + DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp). +) + +// Sink writes log entry into the binary log sink. +// +// sink is a copy of the exported binarylog.Sink, to avoid circular dependency. +type Sink interface { + // Write will be called to write the log entry into the sink. + // + // It should be thread-safe so it can be called in parallel. + Write(*binlogpb.GrpcLogEntry) error + // Close will be called when the Sink is replaced by a new Sink. + Close() error +} + +type noopSink struct{} + +func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } + +// newWriterSink creates a binary log sink with the given writer. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// No buffer is done, Close() doesn't try to close the writer. +func newWriterSink(w io.Writer) Sink { + return &writerSink{out: w} +} + +type writerSink struct { + out io.Writer +} + +func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error { + b, err := proto.Marshal(e) + if err != nil { + grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) + return err + } + hdr := make([]byte, 4) + binary.BigEndian.PutUint32(hdr, uint32(len(b))) + if _, err := ws.out.Write(hdr); err != nil { + return err + } + if _, err := ws.out.Write(b); err != nil { + return err + } + return nil +} + +func (ws *writerSink) Close() error { return nil } + +type bufferedSink struct { + mu sync.Mutex + closer io.Closer + out Sink // out is built on buf. + buf *bufio.Writer // buf is kept for flush. + flusherStarted bool + + writeTicker *time.Ticker + done chan struct{} +} + +func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error { + fs.mu.Lock() + defer fs.mu.Unlock() + if !fs.flusherStarted { + // Start the write loop when Write is called. + fs.startFlushGoroutine() + fs.flusherStarted = true + } + if err := fs.out.Write(e); err != nil { + return err + } + return nil +} + +const ( + bufFlushDuration = 60 * time.Second +) + +func (fs *bufferedSink) startFlushGoroutine() { + fs.writeTicker = time.NewTicker(bufFlushDuration) + go func() { + for { + select { + case <-fs.done: + return + case <-fs.writeTicker.C: + } + fs.mu.Lock() + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + fs.mu.Unlock() + } + }() +} + +func (fs *bufferedSink) Close() error { + fs.mu.Lock() + defer fs.mu.Unlock() + if fs.writeTicker != nil { + fs.writeTicker.Stop() + } + close(fs.done) + if err := fs.buf.Flush(); err != nil { + grpclogLogger.Warningf("failed to flush to Sink: %v", err) + } + if err := fs.closer.Close(); err != nil { + grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err) + } + if err := fs.out.Close(); err != nil { + grpclogLogger.Warningf("failed to close the Sink: %v", err) + } + return nil +} + +// NewBufferedSink creates a binary log sink with the given WriteCloser. +// +// Write() marshals the proto message and writes it to the given writer. Each +// message is prefixed with a 4 byte big endian unsigned integer as the length. +// +// Content is kept in a buffer, and is flushed every 60 seconds. +// +// Close closes the WriteCloser. +func NewBufferedSink(o io.WriteCloser) Sink { + bufW := bufio.NewWriter(o) + return &bufferedSink{ + closer: o, + out: newWriterSink(bufW), + buf: bufW, + done: make(chan struct{}), + } +} diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go new file mode 100644 index 0000000000..4399c3df49 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -0,0 +1,105 @@ +/* + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package buffer provides an implementation of an unbounded buffer. +package buffer + +import "sync" + +// Unbounded is an implementation of an unbounded buffer which does not use +// extra goroutines. This is typically used for passing updates from one entity +// to another within gRPC. +// +// All methods on this type are thread-safe and don't block on anything except +// the underlying mutex used for synchronization. +// +// Unbounded supports values of any type to be stored in it by using a channel +// of `any`. This means that a call to Put() incurs an extra memory allocation, +// and also that users need a type assertion while reading. For performance +// critical code paths, using Unbounded is strongly discouraged and defining a +// new type specific implementation of this buffer is preferred. See +// internal/transport/transport.go for an example of this. +type Unbounded struct { + c chan any + closed bool + mu sync.Mutex + backlog []any +} + +// NewUnbounded returns a new instance of Unbounded. +func NewUnbounded() *Unbounded { + return &Unbounded{c: make(chan any, 1)} +} + +// Put adds t to the unbounded buffer. +func (b *Unbounded) Put(t any) { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } + if len(b.backlog) == 0 { + select { + case b.c <- t: + return + default: + } + } + b.backlog = append(b.backlog, t) +} + +// Load sends the earliest buffered data, if any, onto the read channel +// returned by Get(). Users are expected to call this every time they read a +// value from the read channel. +func (b *Unbounded) Load() { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } +} + +// Get returns a read channel on which values added to the buffer, via Put(), +// are sent on. +// +// Upon reading a value from this channel, users are expected to call Load() to +// send the next buffered value onto the channel if there is any. +// +// If the unbounded buffer is closed, the read channel returned by this method +// is closed. +func (b *Unbounded) Get() <-chan any { + return b.c +} + +// Close closes the unbounded buffer. +func (b *Unbounded) Close() { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } + b.closed = true + close(b.c) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go new file mode 100644 index 0000000000..5395e77529 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -0,0 +1,756 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz defines APIs for enabling channelz service, entry +// registration/deletion, and accessing channelz data. It also defines channelz +// metric struct formats. +// +// All APIs in this package are experimental. +package channelz + +import ( + "errors" + "sort" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultMaxTraceEntry int32 = 30 +) + +var ( + // IDGen is the global channelz entity ID generator. It should not be used + // outside this package except by tests. + IDGen IDGenerator + + db dbWrapper + // EntryPerPage defines the number of channelz entries to be shown on a web page. + EntryPerPage = int64(50) + curState int32 + maxTraceEntry = defaultMaxTraceEntry +) + +// TurnOn turns on channelz data collection. +func TurnOn() { + if !IsOn() { + db.set(newChannelMap()) + IDGen.Reset() + atomic.StoreInt32(&curState, 1) + } +} + +// IsOn returns whether channelz data collection is on. +func IsOn() bool { + return atomic.LoadInt32(&curState) == 1 +} + +// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). +// Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + +// dbWarpper wraps around a reference to internal channelz data storage, and +// provide synchronized functionality to set and get the reference. +type dbWrapper struct { + mu sync.RWMutex + DB *channelMap +} + +func (d *dbWrapper) set(db *channelMap) { + d.mu.Lock() + d.DB = db + d.mu.Unlock() +} + +func (d *dbWrapper) get() *channelMap { + d.mu.RLock() + defer d.mu.RUnlock() + return d.DB +} + +// GetTopChannels returns a slice of top channel's ChannelMetric, along with a +// boolean indicating whether there's more top channels to be queried for. +// +// The arg id specifies that only top channel with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + return db.get().GetTopChannels(id, maxResults) +} + +// GetServers returns a slice of server's ServerMetric, along with a +// boolean indicating whether there's more servers to be queried for. +// +// The arg id specifies that only server with id at or above it will be included +// in the result. The returned slice is up to a length of the arg maxResults or +// EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { + return db.get().GetServers(id, maxResults) +} + +// GetServerSockets returns a slice of server's (identified by id) normal socket's +// SocketMetric, along with a boolean indicating whether there's more sockets to +// be queried for. +// +// The arg startID specifies that only sockets with id at or above it will be +// included in the result. The returned slice is up to a length of the arg maxResults +// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + return db.get().GetServerSockets(id, startID, maxResults) +} + +// GetChannel returns the ChannelMetric for the channel (identified by id). +func GetChannel(id int64) *ChannelMetric { + return db.get().GetChannel(id) +} + +// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannelMetric { + return db.get().GetSubChannel(id) +} + +// GetSocket returns the SocketInternalMetric for the socket (identified by id). +func GetSocket(id int64) *SocketMetric { + return db.get().GetSocket(id) +} + +// GetServer returns the ServerMetric for the server (identified by id). +func GetServer(id int64) *ServerMetric { + return db.get().GetServer(id) +} + +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { + id := IDGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { + isTopChannel = false + parent = pid.Int() + } + + if !IsOn() { + return newIdentifer(RefChannel, id, pid) + } + + cn := &channel{ + refName: ref, + c: c, + subChans: make(map[int64]string), + nestedChans: make(map[int64]string), + id: id, + pid: parent, + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + db.get().addChannel(id, cn, isTopChannel, parent) + return newIdentifer(RefChannel, id, pid) +} + +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") + } + id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } + + sc := &subChannel{ + refName: ref, + c: c, + sockets: make(map[int64]string), + id: id, + pid: pid.Int(), + trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + } + db.get().addSubChannel(id, sc, pid.Int()) + return newIdentifer(RefSubChannel, id, pid), nil +} + +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { + id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } + + svr := &server{ + refName: ref, + s: s, + sockets: make(map[int64]string), + listenSockets: make(map[int64]string), + id: id, + } + db.get().addServer(id, svr) + return newIdentifer(RefServer, id, nil) +} + +// RegisterListenSocket registers the given listen socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this listen socket. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") + } + id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } + + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addListenSocket(id, ls, pid.Int()) + return newIdentifer(RefListenSocket, id, pid), nil +} + +// RegisterNormalSocket registers the given normal socket s in channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this normal socket. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") + } + id := IDGen.genID() + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } + + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addNormalSocket(id, ns, pid.Int()) + return newIdentifer(RefNormalSocket, id, pid), nil +} + +// RemoveEntry removes an entry with unique channelz tracking id to be id from +// channelz database. +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { + if !IsOn() { + return + } + db.get().removeEntry(id.Int()) +} + +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. +type TraceEventDesc struct { + Desc string + Severity Severity + Parent *TraceEventDesc +} + +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { + // Log only the trace description associated with the bottom most entity. + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, withParens(id)+desc.Desc) + case CtWarning: + l.WarningDepth(depth+1, withParens(id)+desc.Desc) + case CtError: + l.ErrorDepth(depth+1, withParens(id)+desc.Desc) + } + + if getMaxTraceEntry() == 0 { + return + } + if IsOn() { + db.get().traceEvent(id.Int(), desc) + } +} + +// channelMap is the storage data structure for channelz. +// Methods of channelMap can be divided in two two categories with respect to locking. +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + servers map[int64]*server + channels map[int64]*channel + subChannels map[int64]*subChannel + listenSockets map[int64]*listenSocket + normalSockets map[int64]*normalSocket +} + +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + } +} + +func (c *channelMap) addServer(id int64, s *server) { + c.mu.Lock() + s.cm = c + c.servers[id] = s + c.mu.Unlock() +} + +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) { + c.mu.Lock() + cn.cm = c + cn.trace.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else { + c.findEntry(pid).addChild(id, cn) + } + c.mu.Unlock() +} + +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) { + c.mu.Lock() + sc.cm = c + sc.trace.cm = c + c.subChannels[id] = sc + c.findEntry(pid).addChild(id, sc) + c.mu.Unlock() +} + +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) { + c.mu.Lock() + ls.cm = c + c.listenSockets[id] = ls + c.findEntry(pid).addChild(id, ls) + c.mu.Unlock() +} + +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) { + c.mu.Lock() + ns.cm = c + c.normalSockets[id] = ns + c.findEntry(pid).addChild(id, ns) + c.mu.Unlock() +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to +// wait on the deletion of its children and until no other entity's channel trace references it. +// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully +// shutting down server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + c.findEntry(id).triggerDelete() + c.mu.Unlock() +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + var v entry + var ok bool + if v, ok = c.channels[id]; ok { + return v + } + if v, ok = c.subChannels[id]; ok { + return v + } + if v, ok = c.servers[id]; ok { + return v + } + if v, ok = c.listenSockets[id]; ok { + return v + } + if v, ok = c.normalSockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// deleteEntry simply deletes an entry from the channelMap. Before calling this +// method, caller must check this entry is ready to be deleted, i.e removeEntry() +// has been called on it, and no children still exist. +// Conditionals are ordered by the expected frequency of deletion of each entity +// type, in order to optimize performance. +func (c *channelMap) deleteEntry(id int64) { + var ok bool + if _, ok = c.normalSockets[id]; ok { + delete(c.normalSockets, id) + return + } + if _, ok = c.subChannels[id]; ok { + delete(c.subChannels, id) + return + } + if _, ok = c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return + } + if _, ok = c.listenSockets[id]; ok { + delete(c.listenSockets, id) + return + } + if _, ok = c.servers[id]; ok { + delete(c.servers, id) + return + } +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { + c.mu.Lock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + c.mu.Unlock() + return + } + childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *channel: + chanType = RefChannel + case *subChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&TraceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } + c.mu.Unlock() +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int64) int64 { + if a < b { + return a + } + return b +} + +func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.topLevelChannels)) + ids := make([]int64, 0, l) + cns := make([]*channel, 0, min(l, maxResults)) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var t []*ChannelMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if cn, ok := c.channels[v]; ok { + cns = append(cns, cn) + t = append(t, &ChannelMetric{ + NestedChans: copyMap(cn.nestedChans), + SubChans: copyMap(cn.subChans), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, cn := range cns { + t[i].ChannelData = cn.c.ChannelzMetric() + t[i].ID = cn.id + t[i].RefName = cn.refName + t[i].Trace = cn.trace.dumpData() + } + return t, end +} + +func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + c.mu.RLock() + l := int64(len(c.servers)) + ids := make([]int64, 0, l) + ss := make([]*server, 0, min(l, maxResults)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := int64(0) + var end bool + var s []*ServerMetric + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if svr, ok := c.servers[v]; ok { + ss = append(ss, svr) + s = append(s, &ServerMetric{ + ListenSockets: copyMap(svr.listenSockets), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, svr := range ss { + s[i].ServerData = svr.s.ChannelzMetric() + s[i].ID = svr.id + s[i].RefName = svr.refName + } + return s, end +} + +func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { + if maxResults <= 0 { + maxResults = EntryPerPage + } + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + // server with id doesn't exist. + c.mu.RUnlock() + return nil, true + } + svrskts := svr.sockets + l := int64(len(svrskts)) + ids := make([]int64, 0, l) + sks := make([]*normalSocket, 0, min(l, maxResults)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) + count := int64(0) + var end bool + for i, v := range ids[idx:] { + if count == maxResults { + break + } + if ns, ok := c.normalSockets[v]; ok { + sks = append(sks, ns) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + s := make([]*SocketMetric, 0, len(sks)) + for _, ns := range sks { + sm := &SocketMetric{} + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + s = append(s, sm) + } + return s, end +} + +func (c *channelMap) GetChannel(id int64) *ChannelMetric { + cm := &ChannelMetric{} + var cn *channel + var ok bool + c.mu.RLock() + if cn, ok = c.channels[id]; !ok { + // channel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.NestedChans = copyMap(cn.nestedChans) + cm.SubChans = copyMap(cn.subChans) + // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when + // holding the lock to prevent potential data race. + chanCopy := cn.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = cn.id + cm.RefName = cn.refName + cm.Trace = cn.trace.dumpData() + return cm +} + +func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { + cm := &SubChannelMetric{} + var sc *subChannel + var ok bool + c.mu.RLock() + if sc, ok = c.subChannels[id]; !ok { + // subchannel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.Sockets = copyMap(sc.sockets) + // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when + // holding the lock to prevent potential data race. + chanCopy := sc.c + c.mu.RUnlock() + cm.ChannelData = chanCopy.ChannelzMetric() + cm.ID = sc.id + cm.RefName = sc.refName + cm.Trace = sc.trace.dumpData() + return cm +} + +func (c *channelMap) GetSocket(id int64) *SocketMetric { + sm := &SocketMetric{} + c.mu.RLock() + if ls, ok := c.listenSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ls.s.ChannelzMetric() + sm.ID = ls.id + sm.RefName = ls.refName + return sm + } + if ns, ok := c.normalSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + return sm + } + c.mu.RUnlock() + return nil +} + +func (c *channelMap) GetServer(id int64) *ServerMetric { + sm := &ServerMetric{} + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + c.mu.RUnlock() + return nil + } + sm.ListenSockets = copyMap(svr.listenSockets) + c.mu.RUnlock() + sm.ID = svr.id + sm.RefName = svr.refName + sm.ServerData = svr.s.ChannelzMetric() + return sm +} + +// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. +type IDGenerator struct { + id int64 +} + +// Reset resets the generated ID back to zero. Should only be used at +// initialization or by tests sensitive to the ID number. +func (i *IDGenerator) Reset() { + atomic.StoreInt64(&i.id, 0) +} + +func (i *IDGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go new file mode 100644 index 0000000000..c9a27acd37 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { + typ RefChannelType + id int64 + str string + pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { + return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { + return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel: [Channel #parent-channel-number Channel #channel-number] +// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { + return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { + if (id != nil) != (other != nil) { + return false + } + if id == nil && other == nil { + return true + } + return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { + return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { + str := fmt.Sprintf("%s #%d", typ, id) + if pid != nil { + str = fmt.Sprintf("%s %s", pid, str) + } + return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go new file mode 100644 index 0000000000..f89e6f77bb --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -0,0 +1,79 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("channelz") + +func withParens(id *Identifier) string { + return "[" + id.String() + "] " +} + +// Info logs and adds a trace event if channelz is on. +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) +} + +// Infof logs and adds a trace event if channelz is on. +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) +} + +// Warning logs and adds a trace event if channelz is on. +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) +} + +// Warningf logs and adds a trace event if channelz is on. +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) +} + +// Error logs and adds a trace event if channelz is on. +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) +} + +// Errorf logs and adds a trace event if channelz is on. +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go new file mode 100644 index 0000000000..1d4020f537 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -0,0 +1,727 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "net" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if child + // list is not empty, then deletion from the database is on hold until the last + // child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child + // list is now empty. If both conditions are met, then delete self from database. + deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 +} + +// dummyEntry is a fake entry to handle entry not found case. +type dummyEntry struct { + idNotFound int64 +} + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race condition. + // For example, there could be a race between ClientConn.Close() info being propagated + // to addrConn and http2Client. ClientConn.Close() cancel the context and result + // in http2Client to error. The error info is then caught by transport monitor + // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, + // the addrConn will create a new transport. And when registering the new transport in + // channelz, its parent addrConn could have already been torn down and deleted + // from channelz tracking, and thus reach the code here. + logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { + return 0 +} + +// ChannelMetric defines the info channelz provides for a specific Channel, which +// includes ChannelInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ChannelMetric struct { + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + // ChannelData contains channel internal metric reported by the channel through + // ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this channel in the format of + // a map from nested channel channelz id to corresponding reference string. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this channel in the format of a + // map from subchannel channelz id to corresponding reference string. + SubChans map[int64]string + // Sockets tracks the socket type children of this channel in the format of a map + // from socket channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow channel having sockets directly, + // therefore, this is field is unused. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// SubChannelMetric defines the info channelz provides for a specific SubChannel, +// which includes ChannelInternalMetric and channelz-specific data, such as +// channelz id, child list, etc. +type SubChannelMetric struct { + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + // ChannelData contains subchannel internal metric reported by the subchannel + // through ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this subchannel in the format of + // a map from nested channel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have nested channels + // as children, therefore, this field is unused. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this subchannel in the format of a + // map from subchannel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have subchannels + // as children, therefore, this field is unused. + SubChans map[int64]string + // Sockets tracks the socket type children of this subchannel in the format of a map + // from socket channelz id to corresponding reference string. + Sockets map[int64]string + // Trace contains the most recent traced events. + Trace *ChannelTrace +} + +// ChannelInternalMetric defines the struct that the implementor of Channel interface +// should return from ChannelzMetric(). +type ChannelInternalMetric struct { + // current connectivity state of the channel. + State connectivity.State + // The target this channel originally tried to connect to. May be absent + Target string + // The number of calls started on the channel. + CallsStarted int64 + // The number of calls that have completed with an OK status. + CallsSucceeded int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp time.Time +} + +// ChannelTrace stores traced events on a channel/subchannel and related info. +type ChannelTrace struct { + // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) + EventNum int64 + // CreationTime is the creation time of the trace. + CreationTime time.Time + // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the + // oldest one) + Events []*TraceEvent +} + +// TraceEvent represent a single trace event +type TraceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType +} + +// Channel is the interface that should be satisfied in order to be tracked by +// channelz as Channel or SubChannel. +type Channel interface { + ChannelzMetric() *ChannelInternalMetric +} + +type dummyChannel struct{} + +func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { + return &ChannelInternalMetric{} +} + +type channel struct { + refName string + c Channel + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 +} + +func (c *channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *subChannel: + c.subChans[id] = v.refName + case *channel: + c.nestedChans[id] = v.refName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *channel) getParentID() int64 { + return c.pid +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *channel) deleteSelfFromTree() (deleted bool) { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return false + } + // not top channel + if c.pid != 0 { + c.cm.findEntry(c.pid).deleteChild(c.id) + } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *channel) deleteSelfFromMap() (delete bool) { + if c.getTraceRefCount() != 0 { + c.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + c.cm.deleteEntry(c.id) + c.trace.clear() +} + +func (c *channel) getChannelTrace() *channelTrace { + return c.trace +} + +func (c *channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *channel) getRefName() string { + return c.refName +} + +type subChannel struct { + refName string + c Channel + closeCalled bool + sockets map[int64]string + id int64 + pid int64 + cm *channelMap + trace *channelTrace + traceRefCount int32 +} + +func (sc *subChannel) addChild(id int64, e entry) { + if v, ok := e.(*normalSocket); ok { + sc.sockets[id] = v.refName + } else { + logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *subChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *subChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *subChannel) getParentID() int64 { + return sc.pid +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *subChannel) deleteSelfFromTree() (deleted bool) { + if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.cm.findEntry(sc.pid).deleteChild(sc.id) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *subChannel) deleteSelfFromMap() (delete bool) { + if sc.getTraceRefCount() != 0 { + // free the grpc struct (i.e. addrConn) + sc.c = &dummyChannel{} + return false + } + return true +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *subChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { + return + } + sc.cm.deleteEntry(sc.id) + sc.trace.clear() +} + +func (sc *subChannel) getChannelTrace() *channelTrace { + return sc.trace +} + +func (sc *subChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *subChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *subChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *subChannel) getRefName() string { + return sc.refName +} + +// SocketMetric defines the info channelz provides for a specific Socket, which +// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. +type SocketMetric struct { + // ID is the channelz id of this socket. + ID int64 + // RefName is the human readable reference string of this socket. + RefName string + // SocketData contains socket internal metric reported by the socket through + // ChannelzMetric(). + SocketData *SocketInternalMetric +} + +// SocketInternalMetric defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketInternalMetric struct { + // The number of streams that have been started. + StreamsStarted int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed int64 + // The number of messages successfully sent on this socket. + MessagesSent int64 + MessagesReceived int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp time.Time + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp time.Time + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp time.Time + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp time.Time + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 + // The locally bound address. + LocalAddr net.Addr + // The remote bound address. May be absent. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. + RemoteName string + SocketOptions *SocketOptionData + Security credentials.ChannelzSecurityValue +} + +// Socket is the interface that should be satisfied in order to be tracked by +// channelz as Socket. +type Socket interface { + ChannelzMetric() *SocketInternalMetric +} + +type listenSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ls *listenSocket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *listenSocket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *listenSocket) triggerDelete() { + ls.cm.deleteEntry(ls.id) + ls.cm.findEntry(ls.pid).deleteChild(ls.id) +} + +func (ls *listenSocket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *listenSocket) getParentID() int64 { + return ls.pid +} + +type normalSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ns *normalSocket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) +} + +func (ns *normalSocket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a normal socket", id) +} + +func (ns *normalSocket) triggerDelete() { + ns.cm.deleteEntry(ns.id) + ns.cm.findEntry(ns.pid).deleteChild(ns.id) +} + +func (ns *normalSocket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a normal socket") +} + +func (ns *normalSocket) getParentID() int64 { + return ns.pid +} + +// ServerMetric defines the info channelz provides for a specific Server, which +// includes ServerInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ServerMetric struct { + // ID is the channelz id of this server. + ID int64 + // RefName is the human readable reference string of this server. + RefName string + // ServerData contains server internal metric reported by the server through + // ChannelzMetric(). + ServerData *ServerInternalMetric + // ListenSockets tracks the listener socket type children of this server in the + // format of a map from socket channelz id to corresponding reference string. + ListenSockets map[int64]string +} + +// ServerInternalMetric defines the struct that the implementor of Server interface +// should return from ChannelzMetric(). +type ServerInternalMetric struct { + // The number of incoming calls started on the server. + CallsStarted int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the server. + LastCallStartedTimestamp time.Time +} + +// Server is the interface to be satisfied in order to be tracked by channelz as +// Server. +type Server interface { + ChannelzMetric() *ServerInternalMetric +} + +type server struct { + refName string + s Server + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + id int64 + cm *channelMap +} + +func (s *server) addChild(id int64, e entry) { + switch v := e.(type) { + case *normalSocket: + s.sockets[id] = v.refName + case *listenSocket: + s.listenSockets[id] = v.refName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.id) +} + +func (s *server) getParentID() int64 { + return 0 +} + +type tracedChannel interface { + getChannelTrace() *channelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +type channelTrace struct { + cm *channelMap + clearCalled bool + createdTime time.Time + eventCount int64 + mu sync.Mutex + events []*TraceEvent +} + +func (c *channelTrace) append(e *TraceEvent) { + c.mu.Lock() + if len(c.events) == getMaxTraceEntry() { + del := c.events[0] + c.events = c.events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.events = append(c.events, e) + c.eventCount++ + c.mu.Unlock() +} + +func (c *channelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true + c.mu.Lock() + for _, e := range c.events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUnknown indicates unknown severity of a trace event. + CtUnknown Severity = iota + // CtInfo indicates info level severity of a trace event. + CtInfo + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota + // RefChannel indicates the referenced entity is a Channel. + RefChannel + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket +) + +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + +func (c *channelTrace) dumpData() *ChannelTrace { + c.mu.Lock() + ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} + ct.Events = c.events[:len(c.events)] + c.mu.Unlock() + return ct +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go new file mode 100644 index 0000000000..1b1c4cce34 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +type SocketOptionData struct { + Linger *unix.Linger + RecvTimeout *unix.Timeval + SendTimeout *unix.Timeval + TCPInfo *unix.TCPInfo +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +func (s *SocketOptionData) Getsockopt(fd uintptr) { + if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil { + s.Linger = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil { + s.RecvTimeout = v + } + if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil { + s.SendTimeout = v + } + if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil { + s.TCPInfo = v + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go new file mode 100644 index 0000000000..8b06eed1ab --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go @@ -0,0 +1,43 @@ +//go:build !linux +// +build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "sync" +) + +var once sync.Once + +// SocketOptionData defines the struct to hold socket option data, and related +// getter function to obtain info from fd. +// Windows OS doesn't support Socket Option +type SocketOptionData struct { +} + +// Getsockopt defines the function to get socket options requested by channelz. +// It is to be passed to syscall.RawConn.Control(). +// Windows OS doesn't support Socket Option +func (s *SocketOptionData) Getsockopt(fd uintptr) { + once.Do(func() { + logger.Warning("Channelz: socket options are not supported on non-linux environments") + }) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go new file mode 100644 index 0000000000..98288c3f86 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -0,0 +1,37 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "syscall" +) + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket any) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go new file mode 100644 index 0000000000..b5568b22e2 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -0,0 +1,27 @@ +//go:build !linux +// +build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(c any) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go new file mode 100644 index 0000000000..9deee7f651 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -0,0 +1,49 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package credentials + +import ( + "context" +) + +// requestInfoKey is a struct to be used as the key to store RequestInfo in a +// context. +type requestInfoKey struct{} + +// NewRequestInfoContext creates a context with ri. +func NewRequestInfoContext(ctx context.Context, ri any) context.Context { + return context.WithValue(ctx, requestInfoKey{}, ri) +} + +// RequestInfoFromContext extracts the RequestInfo from ctx. +func RequestInfoFromContext(ctx context.Context) any { + return ctx.Value(requestInfoKey{}) +} + +// clientHandshakeInfoKey is a struct used as the key to store +// ClientHandshakeInfo in a context. +type clientHandshakeInfoKey struct{} + +// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. +func ClientHandshakeInfoFromContext(ctx context.Context) any { + return ctx.Value(clientHandshakeInfoKey{}) +} + +// NewClientHandshakeInfoContext creates a context with chi. +func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { + return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go new file mode 100644 index 0000000000..25ade62305 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials defines APIs for parsing SPIFFE ID. +// +// All APIs in this package are experimental. +package credentials + +import ( + "crypto/tls" + "crypto/x509" + "net/url" + + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("credentials") + +// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format +// is invalid, return nil with warning. +func SPIFFEIDFromState(state tls.ConnectionState) *url.URL { + if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 { + return nil + } + return SPIFFEIDFromCert(state.PeerCertificates[0]) +} + +// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE +// ID format is invalid, return nil with warning. +func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL { + if cert == nil || cert.URIs == nil { + return nil + } + var spiffeID *url.URL + for _, uri := range cert.URIs { + if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") { + continue + } + // From this point, we assume the uri is intended for a SPIFFE ID. + if len(uri.String()) > 2048 { + logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes") + return nil + } + if len(uri.Host) == 0 || len(uri.Path) == 0 { + logger.Warning("invalid SPIFFE ID: domain or workload ID is empty") + return nil + } + if len(uri.Host) > 255 { + logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters") + return nil + } + // A valid SPIFFE certificate can only have exactly one URI SAN field. + if len(cert.URIs) > 1 { + logger.Warning("invalid SPIFFE ID: multiple URI SANs") + return nil + } + spiffeID = uri + } + return spiffeID +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go new file mode 100644 index 0000000000..2919632d65 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go @@ -0,0 +1,58 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "net" + "syscall" +) + +type sysConn = syscall.Conn + +// syscallConn keeps reference of rawConn to support syscall.Conn for channelz. +// SyscallConn() (the method in interface syscall.Conn) is explicitly +// implemented on this type, +// +// Interface syscall.Conn is implemented by most net.Conn implementations (e.g. +// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns +// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn +// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't +// help here). +type syscallConn struct { + net.Conn + // sysConn is a type alias of syscall.Conn. It's necessary because the name + // `Conn` collides with `net.Conn`. + sysConn +} + +// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that +// implements syscall.Conn. rawConn will be used to support syscall, and newConn +// will be used for read/write. +// +// This function returns newConn if rawConn doesn't implement syscall.Conn. +func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn { + sysConn, ok := rawConn.(syscall.Conn) + if !ok { + return newConn + } + return &syscallConn{ + Conn: newConn, + sysConn: sysConn, + } +} diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go new file mode 100644 index 0000000000..f792fd22ca --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/credentials/util.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +const alpnProtoStrH2 = "h2" + +// AppendH2ToNextProtos appends h2 to next protos. +func AppendH2ToNextProtos(ps []string) []string { + for _, p := range ps { + if p == alpnProtoStrH2 { + return ps + } + } + ret := make([]string, 0, len(ps)+1) + ret = append(ret, ps...) + return append(ret, alpnProtoStrH2) +} + +// CloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +// +// TODO: inline this function if possible. +func CloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go new file mode 100644 index 0000000000..3cf10ddfbd --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package envconfig contains grpc settings configured by environment variables. +package envconfig + +import ( + "os" + "strconv" + "strings" +) + +var ( + // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // AdvertiseCompressors is set if registered compressor should be advertised + // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). + AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 + // entries but may be overridden by setting the environment variable + // "GRPC_RING_HASH_CAP". This does not override the default bounds + // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). + RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // PickFirstLBConfig is set if we should support configuration of the + // pick_first LB policy. + PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true) + // LeastRequestLB is set if we should support the least_request_experimental + // LB policy, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". + LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS + // handshakes that can be performed. + ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) +) + +func boolFromEnv(envVar string, def bool) bool { + if def { + // The default is true; return true unless the variable is "false". + return !strings.EqualFold(os.Getenv(envVar), "false") + } + // The default is false; return false unless the variable is "true". + return strings.EqualFold(os.Getenv(envVar), "true") +} + +func uint64FromEnv(envVar string, def, min, max uint64) uint64 { + v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) + if err != nil { + return def + } + if v < min { + return min + } + if v > max { + return max + } + return v +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go new file mode 100644 index 0000000000..dd314cfb18 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -0,0 +1,42 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import "os" + +const ( + envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG" + envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE" +) + +var ( + // ObservabilityConfig is the json configuration for the gcp/observability + // package specified directly in the envObservabilityConfig env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ObservabilityConfig = os.Getenv(envObservabilityConfig) + // ObservabilityConfigFile is the json configuration for the + // gcp/observability specified in a file with the location specified in + // envObservabilityConfigFile env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) +) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go new file mode 100644 index 0000000000..02b4b6a1c1 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -0,0 +1,95 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package envconfig + +import ( + "os" +) + +const ( + // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. + // Do not use this and read from env directly. Its value is read and kept in + // variable XDSBootstrapFileName. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file + // content. Do not use this and read from env directly. Its value is read + // and kept in variable XDSBootstrapFileContent. + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" +) + +var ( + // XDSBootstrapFileName holds the name of the file which contains xDS + // bootstrap configuration. Users can specify the location of the bootstrap + // file by setting the environment variable "GRPC_XDS_BOOTSTRAP". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv) + // XDSBootstrapFileContent holds the content of the xDS bootstrap + // configuration. Users can specify the bootstrap config by setting the + // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG". + // + // When both bootstrap FileName and FileContent are set, FileName is used. + XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) + // XDSRingHash indicates whether ring hash support is enabled, which can be + // disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". + XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) + // XDSClientSideSecurity is used to control processing of security + // configuration on the client-side. + // + // Note that there is no env var protection for the server-side because we + // have a brand new API on the server-side and users explicitly need to use + // the new API to get security integration on the server. + XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) + // XDSAggregateAndDNS indicates whether processing of aggregated cluster and + // DNS cluster is enabled, which can be disabled by setting the environment + // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + // to "false". + XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) + + // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, + // which can be disabled by setting the environment variable + // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". + XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) + // XDSOutlierDetection indicates whether outlier detection support is + // enabled, which can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". + XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) + // XDSFederation indicates whether federation support is enabled, which can + // be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". + XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) + + // XDSRLS indicates whether processing of Cluster Specifier plugins and + // support for the RLS CLuster Specifier is enabled, which can be disabled by + // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to + // "false". + XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) + + // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which + // can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". + XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) +) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go new file mode 100644 index 0000000000..bfc45102ab --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog (internal) defines depth logging for grpc. +package grpclog + +import ( + "os" +) + +// Logger is the logger used for the non-depth log functions. +var Logger LoggerV2 + +// DepthLogger is the logger used for the depth log functions. +var DepthLogger DepthLoggerV2 + +// InfoDepth logs to the INFO log at the specified depth. +func InfoDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.InfoDepth(depth, args...) + } else { + Logger.Infoln(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +func WarningDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.WarningDepth(depth, args...) + } else { + Logger.Warningln(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +func ErrorDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.ErrorDepth(depth, args...) + } else { + Logger.Errorln(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +func FatalDepth(depth int, args ...any) { + if DepthLogger != nil { + DepthLogger.FatalDepth(depth, args...) + } else { + Logger.Fatalln(args...) + } + os.Exit(1) +} + +// LoggerV2 does underlying logging work for grpclog. +// This is a copy of the LoggerV2 defined in the external grpclog package. It +// is defined here to avoid a circular dependency. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...any) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...any) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...any) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...any) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...any) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...any) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...any) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...any) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...any) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...any) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...any) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...any) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// This is a copy of the DepthLoggerV2 defined in the external grpclog package. +// It is defined here to avoid a circular dependency. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. + InfoDepth(depth int, args ...any) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. + WarningDepth(depth int, args ...any) + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. + ErrorDepth(depth int, args ...any) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. + FatalDepth(depth int, args ...any) +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go new file mode 100644 index 0000000000..faa998de76 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -0,0 +1,93 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "fmt" +) + +// PrefixLogger does logging with a prefix. +// +// Logging method on a nil logs without any prefix. +type PrefixLogger struct { + logger DepthLoggerV2 + prefix string +} + +// Infof does info logging. +func (pl *PrefixLogger) Infof(format string, args ...any) { + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) +} + +// Warningf does warning logging. +func (pl *PrefixLogger) Warningf(format string, args ...any) { + if pl != nil { + format = pl.prefix + format + pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) + return + } + WarningDepth(1, fmt.Sprintf(format, args...)) +} + +// Errorf does error logging. +func (pl *PrefixLogger) Errorf(format string, args ...any) { + if pl != nil { + format = pl.prefix + format + pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) + return + } + ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +// Debugf does info logging at verbose level 2. +func (pl *PrefixLogger) Debugf(format string, args ...any) { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. + if !Logger.V(2) { + return + } + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + pl.logger.InfoDepth(1, fmt.Sprintf(format, args...)) + return + } + InfoDepth(1, fmt.Sprintf(format, args...)) + +} + +// V reports whether verbosity level l is at least the requested verbose level. +func (pl *PrefixLogger) V(l int) bool { + // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe + // rewrite PrefixLogger a little to ensure that we don't use the global + // `Logger` here, and instead use the `logger` field. + return Logger.V(l) +} + +// NewPrefixLogger creates a prefix logger with the given prefix. +func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger { + return &PrefixLogger{logger: logger, prefix: prefix} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go new file mode 100644 index 0000000000..aa97273e7d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -0,0 +1,95 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcrand implements math/rand functions in a concurrent-safe way +// with a global random source, independent of math/rand's global source. +package grpcrand + +import ( + "math/rand" + "sync" + "time" +) + +var ( + r = rand.New(rand.NewSource(time.Now().UnixNano())) + mu sync.Mutex +) + +// Int implements rand.Int on the grpcrand global source. +func Int() int { + mu.Lock() + defer mu.Unlock() + return r.Int() +} + +// Int63n implements rand.Int63n on the grpcrand global source. +func Int63n(n int64) int64 { + mu.Lock() + defer mu.Unlock() + return r.Int63n(n) +} + +// Intn implements rand.Intn on the grpcrand global source. +func Intn(n int) int { + mu.Lock() + defer mu.Unlock() + return r.Intn(n) +} + +// Int31n implements rand.Int31n on the grpcrand global source. +func Int31n(n int32) int32 { + mu.Lock() + defer mu.Unlock() + return r.Int31n(n) +} + +// Float64 implements rand.Float64 on the grpcrand global source. +func Float64() float64 { + mu.Lock() + defer mu.Unlock() + return r.Float64() +} + +// Uint64 implements rand.Uint64 on the grpcrand global source. +func Uint64() uint64 { + mu.Lock() + defer mu.Unlock() + return r.Uint64() +} + +// Uint32 implements rand.Uint32 on the grpcrand global source. +func Uint32() uint32 { + mu.Lock() + defer mu.Unlock() + return r.Uint32() +} + +// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. +func ExpFloat64() float64 { + mu.Lock() + defer mu.Unlock() + return r.ExpFloat64() +} + +// Shuffle implements rand.Shuffle on the grpcrand global source. +var Shuffle = func(n int, f func(int, int)) { + mu.Lock() + defer mu.Unlock() + r.Shuffle(n, f) +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go new file mode 100644 index 0000000000..900917dbe6 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -0,0 +1,125 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/buffer" +) + +// CallbackSerializer provides a mechanism to schedule callbacks in a +// synchronized manner. It provides a FIFO guarantee on the order of execution +// of scheduled callbacks. New callbacks can be scheduled by invoking the +// Schedule() method. +// +// This type is safe for concurrent access. +type CallbackSerializer struct { + // done is closed once the serializer is shut down completely, i.e all + // scheduled callbacks are executed and the serializer has deallocated all + // its resources. + done chan struct{} + + callbacks *buffer.Unbounded + closedMu sync.Mutex + closed bool +} + +// NewCallbackSerializer returns a new CallbackSerializer instance. The provided +// context will be passed to the scheduled callbacks. Users should cancel the +// provided context to shutdown the CallbackSerializer. It is guaranteed that no +// callbacks will be added once this context is canceled, and any pending un-run +// callbacks will be executed before the serializer is shut down. +func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { + cs := &CallbackSerializer{ + done: make(chan struct{}), + callbacks: buffer.NewUnbounded(), + } + go cs.run(ctx) + return cs +} + +// Schedule adds a callback to be scheduled after existing callbacks are run. +// +// Callbacks are expected to honor the context when performing any blocking +// operations, and should return early when the context is canceled. +// +// Return value indicates if the callback was successfully added to the list of +// callbacks to be executed by the serializer. It is not possible to add +// callbacks once the context passed to NewCallbackSerializer is cancelled. +func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + cs.closedMu.Lock() + defer cs.closedMu.Unlock() + + if cs.closed { + return false + } + cs.callbacks.Put(f) + return true +} + +func (cs *CallbackSerializer) run(ctx context.Context) { + var backlog []func(context.Context) + + defer close(cs.done) + for ctx.Err() == nil { + select { + case <-ctx.Done(): + // Do nothing here. Next iteration of the for loop will not happen, + // since ctx.Err() would be non-nil. + case callback, ok := <-cs.callbacks.Get(): + if !ok { + return + } + cs.callbacks.Load() + callback.(func(ctx context.Context))(ctx) + } + } + + // Fetch pending callbacks if any, and execute them before returning from + // this method and closing cs.done. + cs.closedMu.Lock() + cs.closed = true + backlog = cs.fetchPendingCallbacks() + cs.callbacks.Close() + cs.closedMu.Unlock() + for _, b := range backlog { + b(ctx) + } +} + +func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { + var backlog []func(context.Context) + for { + select { + case b := <-cs.callbacks.Get(): + backlog = append(backlog, b.(func(context.Context))) + cs.callbacks.Load() + default: + return backlog + } + } +} + +// Done returns a channel that is closed after the context passed to +// NewCallbackSerializer is canceled and all callbacks have been executed. +func (cs *CallbackSerializer) Done() <-chan struct{} { + return cs.done +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go new file mode 100644 index 0000000000..fbe697c376 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go @@ -0,0 +1,61 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcsync implements additional synchronization primitives built upon +// the sync package. +package grpcsync + +import ( + "sync" + "sync/atomic" +) + +// Event represents a one-time event that may occur in the future. +type Event struct { + fired int32 + c chan struct{} + o sync.Once +} + +// Fire causes e to complete. It is safe to call multiple times, and +// concurrently. It returns true iff this call to Fire caused the signaling +// channel returned by Done to close. +func (e *Event) Fire() bool { + ret := false + e.o.Do(func() { + atomic.StoreInt32(&e.fired, 1) + close(e.c) + ret = true + }) + return ret +} + +// Done returns a channel that will be closed when Fire is called. +func (e *Event) Done() <-chan struct{} { + return e.c +} + +// HasFired returns true if Fire has been called. +func (e *Event) HasFired() bool { + return atomic.LoadInt32(&e.fired) == 1 +} + +// NewEvent returns a new, ready-to-use Event. +func NewEvent() *Event { + return &Event{c: make(chan struct{})} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go new file mode 100644 index 0000000000..6635f7bca9 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go @@ -0,0 +1,32 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "sync" +) + +// OnceFunc returns a function wrapping f which ensures f is only executed +// once even if the returned function is executed multiple times. +func OnceFunc(f func()) func() { + var once sync.Once + return func() { + once.Do(f) + } +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go new file mode 100644 index 0000000000..aef8cec1ab --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -0,0 +1,121 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" +) + +// Subscriber represents an entity that is subscribed to messages published on +// a PubSub. It wraps the callback to be invoked by the PubSub when a new +// message is published. +type Subscriber interface { + // OnMessage is invoked when a new message is published. Implementations + // must not block in this method. + OnMessage(msg any) +} + +// PubSub is a simple one-to-many publish-subscribe system that supports +// messages of arbitrary type. It guarantees that messages are delivered in +// the same order in which they were published. +// +// Publisher invokes the Publish() method to publish new messages, while +// subscribers interested in receiving these messages register a callback +// via the Subscribe() method. +// +// Once a PubSub is stopped, no more messages can be published, but any pending +// published messages will be delivered to the subscribers. Done may be used +// to determine when all published messages have been delivered. +type PubSub struct { + cs *CallbackSerializer + + // Access to the below fields are guarded by this mutex. + mu sync.Mutex + msg any + subscribers map[Subscriber]bool +} + +// NewPubSub returns a new PubSub instance. Users should cancel the +// provided context to shutdown the PubSub. +func NewPubSub(ctx context.Context) *PubSub { + return &PubSub{ + cs: NewCallbackSerializer(ctx), + subscribers: map[Subscriber]bool{}, + } +} + +// Subscribe registers the provided Subscriber to the PubSub. +// +// If the PubSub contains a previously published message, the Subscriber's +// OnMessage() callback will be invoked asynchronously with the existing +// message to begin with, and subsequently for every newly published message. +// +// The caller is responsible for invoking the returned cancel function to +// unsubscribe itself from the PubSub. +func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.subscribers[sub] = true + + if ps.msg != nil { + msg := ps.msg + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[sub] { + return + } + sub.OnMessage(msg) + }) + } + + return func() { + ps.mu.Lock() + defer ps.mu.Unlock() + delete(ps.subscribers, sub) + } +} + +// Publish publishes the provided message to the PubSub, and invokes +// callbacks registered by subscribers asynchronously. +func (ps *PubSub) Publish(msg any) { + ps.mu.Lock() + defer ps.mu.Unlock() + + ps.msg = msg + for sub := range ps.subscribers { + s := sub + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[s] { + return + } + s.OnMessage(msg) + }) + } +} + +// Done returns a channel that is closed after the context passed to NewPubSub +// is canceled and all updates have been sent to subscribers. +func (ps *PubSub) Done() <-chan struct{} { + return ps.cs.Done() +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go new file mode 100644 index 0000000000..9f40909679 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/internal/envconfig" +) + +// RegisteredCompressorNames holds names of the registered compressors. +var RegisteredCompressorNames []string + +// IsCompressorNameRegistered returns true when name is available in registry. +func IsCompressorNameRegistered(name string) bool { + for _, compressor := range RegisteredCompressorNames { + if compressor == name { + return true + } + } + return false +} + +// RegisteredCompressors returns a string of registered compressor names +// separated by comma. +func RegisteredCompressors() string { + if !envconfig.AdvertiseCompressors { + return "" + } + return strings.Join(RegisteredCompressorNames, ",") +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go new file mode 100644 index 0000000000..b25b0baec3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strconv" + "time" +) + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if d%r > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// EncodeDuration encodes the duration to the format grpc-timeout header +// accepts. +// +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +func EncodeDuration(t time.Duration) string { + // TODO: This is simplistic and not bandwidth efficient. Improve it. + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go new file mode 100644 index 0000000000..e2f948e8f4 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go @@ -0,0 +1,20 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides utility functions used across the gRPC codebase. +package grpcutil diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go new file mode 100644 index 0000000000..6f22bd8911 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "context" + + "google.golang.org/grpc/metadata" +) + +type mdExtraKey struct{} + +// WithExtraMetadata creates a new context with incoming md attached. +func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context { + return context.WithValue(ctx, mdExtraKey{}, md) +} + +// ExtraMetadata returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) { + md, ok = ctx.Value(mdExtraKey{}).(metadata.MD) + return +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go new file mode 100644 index 0000000000..ec62b4775e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -0,0 +1,88 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "errors" + "strings" +) + +// ParseMethod splits service and method from the input. It expects format +// "/service/method". +func ParseMethod(methodName string) (service, method string, _ error) { + if !strings.HasPrefix(methodName, "/") { + return "", "", errors.New("invalid method name: should start with /") + } + methodName = methodName[1:] + + pos := strings.LastIndex(methodName, "/") + if pos < 0 { + return "", "", errors.New("invalid method name: suffix /method is missing") + } + return methodName[:pos], methodName[pos+1:], nil +} + +// baseContentType is the base content-type for gRPC. This is a valid +// content-type on it's own, but can also include a content-subtype such as +// "proto" as a suffix after "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests +// for more details. +const baseContentType = "application/grpc" + +// ContentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func ContentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// ContentType builds full content type with the given sub-type. +// +// contentSubtype is assumed to be lowercase +func ContentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go new file mode 100644 index 0000000000..7a092b2b80 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go @@ -0,0 +1,31 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import "regexp" + +// FullMatchWithRegex returns whether the full text matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, text string) bool { + if len(text) == 0 { + return re.MatchString(text) + } + re.Longest() + rem := re.FindString(text) + return len(rem) == len(text) +} diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go new file mode 100644 index 0000000000..6c272476e5 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -0,0 +1,301 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package idle contains a component for managing idleness (entering and exiting) +// based on RPC activity. +package idle + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// Enforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type Enforcer interface { + ExitIdleMode() error + EnterIdleMode() error +} + +// Manager defines the functionality required to track RPC activity on a +// channel. +type Manager interface { + OnCallBegin() error + OnCallEnd() + Close() +} + +type noopManager struct{} + +func (noopManager) OnCallBegin() error { return nil } +func (noopManager) OnCallEnd() {} +func (noopManager) Close() {} + +// manager implements the Manager interface. It uses atomic operations to +// synchronize access to shared state and a mutex to guarantee mutual exclusion +// in a critical section. +type manager struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer Enforcer // Functionality provided by grpc.ClientConn. + timeout int64 // Idle timeout duration nanos stored as an int64. + logger grpclog.LoggerV2 + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and OnCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// ManagerOptions is a collection of options used by +// NewManager. +type ManagerOptions struct { + Enforcer Enforcer + Timeout time.Duration + Logger grpclog.LoggerV2 +} + +// NewManager creates a new idleness manager implementation for the +// given idle timeout. +func NewManager(opts ManagerOptions) Manager { + if opts.Timeout == 0 { + return noopManager{} + } + + m := &manager{ + enforcer: opts.Enforcer, + timeout: int64(opts.Timeout), + logger: opts.Logger, + } + m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout) + return m +} + +// resetIdleTimer resets the idle timer to the given duration. This method +// should only be called from the timer callback. +func (m *manager) resetIdleTimer(d time.Duration) { + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if m.timer == nil { + // Only close sets timer to nil. We are done. + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback, which means the timer has + // already fired. + m.timer.Reset(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (m *manager) handleIdleTimeout() { + if m.isClosed() { + return + } + + if atomic.LoadInt32(&m.activeCallsCount) > 0 { + m.resetIdleTimer(time.Duration(m.timeout)) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano())) + return + } + + // This CAS operation is extremely likely to succeed given that there has + // been no activity since the last time we were here. Setting the + // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the + // channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity at the top of this method, or one was ongoing from before + // the last time we were here. In both case, reset the timer and return. + m.resetIdleTimer(time.Duration(m.timeout)) + return + } + + // Now that we've set the active calls count to -math.MaxInt32, it's time to + // actually move to idle mode. + if m.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.resetIdleTimer(time.Duration(m.timeout)) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (m *manager) tryEnterIdleMode() bool { + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + return false + } + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // An very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + return false + } + + // No new RPCs have come in since we last set the active calls count value + // -math.MaxInt32 in the timer callback. And since we have the lock, it is + // safe to enter idle mode now. + if err := m.enforcer.EnterIdleMode(); err != nil { + m.logger.Errorf("Failed to enter idle mode: %v", err) + return false + } + + // Successfully entered idle mode. + m.actuallyIdle = true + return true +} + +// OnCallBegin is invoked at the start of every RPC. +func (m *manager) OnCallBegin() error { + if m.isClosed() { + return nil + } + + if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := m.exitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&m.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil +} + +// exitIdleMode instructs the channel to exit idle mode. +// +// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. +func (m *manager) exitIdleMode() error { + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if !m.actuallyIdle { + // This can happen in two scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and OnCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in OnCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // + // Either way, nothing to do here. + return nil + } + + if err := m.enforcer.ExitIdleMode(); err != nil { + return fmt.Errorf("channel failed to exit idle mode: %v", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout) + return nil +} + +// OnCallEnd is invoked at the end of every RPC. +func (m *manager) OnCallEnd() { + if m.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&m.activeCallsCount, -1) +} + +func (m *manager) isClosed() bool { + return atomic.LoadInt32(&m.closed) == 1 +} + +func (m *manager) Close() { + atomic.StoreInt32(&m.closed, 1) + + m.idleMu.Lock() + m.timer.Stop() + m.timer = nil + m.idleMu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 0000000000..c8a8c76d62 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,205 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code, to avoid polluting +// the godoc of the top-level grpc package. It must not import any grpc +// symbols to avoid circular dependencies. +package internal + +import ( + "context" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // WithHealthCheckFunc is set by dialoptions.go + WithHealthCheckFunc any // func (HealthChecker) DialOption + // HealthCheckFunc is used to provide client-side LB channel health checking + HealthCheckFunc HealthChecker + // BalancerUnregister is exported by package balancer to unregister a balancer. + BalancerUnregister func(name string) + // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by + // default, but tests may wish to set it lower for convenience. + KeepaliveMinPingTime = 10 * time.Second + // KeepaliveMinServerPingTime is the minimum ping interval for servers. + // This must be 1s by default, but tests may wish to set it lower for + // convenience. + KeepaliveMinServerPingTime = time.Second + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig any // func(string) *serviceconfig.ParseResult + // EqualServiceConfigForTesting is for testing service config generation and + // parsing. Both a and b should be returned by ParseServiceConfig. + // This function compares the config without rawJSON stripped, in case the + // there's difference in white space. + EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool + // GetCertificateProviderBuilder returns the registered builder for the + // given name. This is set by package certprovider for use from xDS + // bootstrap code while parsing certificate provider configs in the + // bootstrap file. + GetCertificateProviderBuilder any // func(string) certprovider.Builder + // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo + // stored in the passed in attributes. This is set by + // credentials/xds/xds.go. + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo + // GetServerCredentials returns the transport credentials configured on a + // gRPC server. An xDS-enabled server needs to know what type of credentials + // is configured on the underlying gRPC server. This is set by server.go. + GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials + // CanonicalString returns the canonical string of the code defined here: + // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + CanonicalString any // func (codes.Code) string + // DrainServerTransports initiates a graceful close of existing connections + // on a gRPC server accepted on the provided listener address. An + // xDS-enabled server invokes this method on a grpc.Server when a particular + // listener moves to "not-serving" mode. + DrainServerTransports any // func(*grpc.Server, string) + // AddGlobalServerOptions adds an array of ServerOption that will be + // effective globally for newly created servers. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalServerOptions any // func(opt ...ServerOption) + // ClearGlobalServerOptions clears the array of extra ServerOption. This + // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ClearGlobalServerOptions func() + // AddGlobalDialOptions adds an array of DialOption that will be effective + // globally for newly created client channels. The priority will be: 1. + // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + AddGlobalDialOptions any // func(opt ...DialOption) + // DisableGlobalDialOptions returns a DialOption that prevents the + // ClientConn from applying the global DialOptions (set via + // AddGlobalDialOptions). + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + DisableGlobalDialOptions any // func() grpc.DialOption + // ClearGlobalDialOptions clears the array of extra DialOption. This + // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + ClearGlobalDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a + // single dial option. + JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption + // JoinServerOptions combines the server options passed as arguments into a + // single server option. + JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption + + // WithBinaryLogger returns a DialOption that specifies the binary logger + // for a ClientConn. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption + // BinaryLogger returns a ServerOption that can set the binary logger for a + // server. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. + BinaryLogger any // func(binarylog.Logger) grpc.ServerOption + + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) + + // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using + // the provided xds bootstrap config instead of the global configuration from + // the supported environment variables. The resolver.Builder is meant to be + // used in conjunction with the grpc.WithResolvers DialOption. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) + + // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster + // Specifier Plugin for testing purposes, regardless of the XDSRLS environment + // variable. + // + // TODO: Remove this function once the RLS env var is removed. + RegisterRLSClusterSpecifierPluginForTesting func() + + // UnregisterRLSClusterSpecifierPluginForTesting unregisters the RLS Cluster + // Specifier Plugin for testing purposes. This is needed because there is no way + // to unregister the RLS Cluster Specifier Plugin after registering it solely + // for testing purposes using RegisterRLSClusterSpecifierPluginForTesting(). + // + // TODO: Remove this function once the RLS env var is removed. + UnregisterRLSClusterSpecifierPluginForTesting func() + + // RegisterRBACHTTPFilterForTesting registers the RBAC HTTP Filter for testing + // purposes, regardless of the RBAC environment variable. + // + // TODO: Remove this function once the RBAC env var is removed. + RegisterRBACHTTPFilterForTesting func() + + // UnregisterRBACHTTPFilterForTesting unregisters the RBAC HTTP Filter for + // testing purposes. This is needed because there is no way to unregister the + // HTTP Filter after registering it solely for testing purposes using + // RegisterRBACHTTPFilterForTesting(). + // + // TODO: Remove this function once the RBAC env var is removed. + UnregisterRBACHTTPFilterForTesting func() + + // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. + ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) + + // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra + // metadata to RPCs. + GRPCResolverSchemeExtraMetadata string = "xds" +) + +// HealthChecker defines the signature of the client-side LB channel health checking function. +// +// The implementation is expected to create a health checking RPC stream by +// calling newStream(), watch for the health status of serviceName, and report +// it's health back by calling setConnectivityState(). +// +// The health checking protocol is defined at: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md +type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error + +const ( + // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. + CredsBundleModeFallback = "fallback" + // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer + // mode. + CredsBundleModeBalancer = "balancer" + // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode + // that supports backend returned by grpclb balancer. + CredsBundleModeBackendFromBalancer = "backend-from-balancer" +) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go new file mode 100644 index 0000000000..900bfb7160 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -0,0 +1,132 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata contains functions to set and get metadata from addresses. +// +// This package is experimental. +package metadata + +import ( + "fmt" + "strings" + + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +type mdKeyType string + +const mdKey = mdKeyType("grpc.internal.address.metadata") + +type mdValue metadata.MD + +func (m mdValue) Equal(o any) bool { + om, ok := o.(mdValue) + if !ok { + return false + } + if len(m) != len(om) { + return false + } + for k, v := range m { + ov := om[k] + if len(ov) != len(v) { + return false + } + for i, ve := range v { + if ov[i] != ve { + return false + } + } + } + return true +} + +// Get returns the metadata of addr. +func Get(addr resolver.Address) metadata.MD { + attrs := addr.Attributes + if attrs == nil { + return nil + } + md, _ := attrs.Value(mdKey).(mdValue) + return metadata.MD(md) +} + +// Set sets (overrides) the metadata in addr. +// +// When a SubConn is created with this address, the RPCs sent on it will all +// have this metadata. +func Set(addr resolver.Address, md metadata.MD) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) + return addr +} + +// Validate validates every pair in md with ValidatePair. +func Validate(md metadata.MD) error { + for k, vals := range md { + if err := ValidatePair(k, vals...); err != nil { + return err + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} + +// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : +// +// - key must contain one or more characters. +// - the characters in the key must be contained in [0-9 a-z _ - .]. +// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. +// - the characters in the every value must be printable (in [%x20-%x7E]). +func ValidatePair(key string, vals ...string) error { + // key should not be empty + if key == "" { + return fmt.Errorf("there is an empty key in the header") + } + // pseudo-header will be ignored + if key[0] == ':' { + return nil + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(key); i++ { + r := key[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) + } + } + if strings.HasSuffix(key, "-bin") { + return nil + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key) + } + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 0000000000..7033191375 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + protov1 "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e any) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} + ret, err := mm.MarshalToString(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return ret + case protov2.Message: + mm := protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + default: + ret, err := json.MarshalIndent(ee, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go new file mode 100644 index 0000000000..f0603871c9 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -0,0 +1,167 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver provides internal resolver-related functionality. +package resolver + +import ( + "context" + "sync" + + "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +// ConfigSelector controls what configuration to use for every RPC. +type ConfigSelector interface { + // Selects the configuration for the RPC, or terminates it using the error. + // This error will be converted by the gRPC library to a status error with + // code UNKNOWN if it is not returned as a status error. + SelectConfig(RPCInfo) (*RPCConfig, error) +} + +// RPCInfo contains RPC information needed by a ConfigSelector. +type RPCInfo struct { + // Context is the user's context for the RPC and contains headers and + // application timeout. It is passed for interception purposes and for + // efficiency reasons. SelectConfig should not be blocking. + Context context.Context + Method string // i.e. "/Service/Method" +} + +// RPCConfig describes the configuration to use for each RPC. +type RPCConfig struct { + // The context to use for the remainder of the RPC; can pass info to LB + // policy or affect timeout or metadata. + Context context.Context + MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC + OnCommitted func() // Called when the RPC has been committed (retries no longer possible) + Interceptor ClientInterceptor +} + +// ClientStream is the same as grpc.ClientStream, but defined here for circular +// dependency reasons. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m any) error +} + +// ClientInterceptor is an interceptor for gRPC client streams. +type ClientInterceptor interface { + // NewStream produces a ClientStream for an RPC which may optionally use + // the provided function to produce a stream for delegation. Note: + // RPCInfo.Context should not be used (will be nil). + // + // done is invoked when the RPC is finished using its connection, or could + // not be assigned a connection. RPC operations may still occur on + // ClientStream after done is called, since the interceptor is invoked by + // application-layer operations. done must never be nil when called. + NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error) +} + +// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side. +type ServerInterceptor interface { + // AllowRPC checks if an incoming RPC is allowed to proceed based on + // information about connection RPC was received on, and HTTP Headers. This + // information will be piped into context. + AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting. +} + +type csKeyType string + +const csKey = csKeyType("grpc.internal.resolver.configSelector") + +// SetConfigSelector sets the config selector in state and returns the new +// state. +func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State { + state.Attributes = state.Attributes.WithValue(csKey, cs) + return state +} + +// GetConfigSelector retrieves the config selector from state, if present, and +// returns it or nil if absent. +func GetConfigSelector(state resolver.State) ConfigSelector { + cs, _ := state.Attributes.Value(csKey).(ConfigSelector) + return cs +} + +// SafeConfigSelector allows for safe switching of ConfigSelector +// implementations such that previous values are guaranteed to not be in use +// when UpdateConfigSelector returns. +type SafeConfigSelector struct { + mu sync.RWMutex + cs ConfigSelector +} + +// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until +// all uses of the previous ConfigSelector have completed. +func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) { + scs.mu.Lock() + defer scs.mu.Unlock() + scs.cs = cs +} + +// SelectConfig defers to the current ConfigSelector in scs. +func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) { + scs.mu.RLock() + defer scs.mu.RUnlock() + return scs.cs.SelectConfig(r) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go new file mode 100644 index 0000000000..99e1e5b36c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -0,0 +1,470 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + grpclbstate "google.golang.org/grpc/balancer/grpclb/state" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB +// addresses from SRV records. Must not be changed after init time. +var EnableSRVLookups = false + +var logger = grpclog.Component("dns") + +// Globals to stub out in tests. TODO: Perhaps these two can be combined into a +// single variable for testing the resolver? +var ( + newTimer = time.NewTimer + newTimerDNSResRate = time.NewTimer +) + +func init() { + resolver.Register(NewBuilder()) +} + +const ( + defaultPort = "443" + defaultDNSSvrPort = "53" + golang = "GO" + // txtPrefix is the prefix string to be prepended to the host name for txt + // record lookup. + txtPrefix = "_grpc_config." + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var ( + errMissingAddr = errors.New("dns resolver: missing address") + + // Addresses ending with a colon that is supposed to be the separator + // between host and port is not allowed. E.g. "::" is a valid address as + // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with + // a colon as the host and port separator + errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +var ( + defaultResolver netResolver = net.DefaultResolver + // To prevent excessive re-resolution, we enforce a rate limit on DNS + // resolution requests. + minDNSResRate = 30 * time.Second +) + +var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { + var dialer net.Dialer + return dialer.DialContext(ctx, network, address) + } +} + +var newNetResolver = func(authority string) (netResolver, error) { + host, port, err := parseTarget(authority, defaultDNSSvrPort) + if err != nil { + return nil, err + } + + authorityWithPort := net.JoinHostPort(host, port) + + return &net.Resolver{ + PreferGo: true, + Dial: addressDialer(authorityWithPort), + }, nil +} + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{} +} + +type dnsBuilder struct{} + +// Build creates and starts a DNS resolver that watches the name resolution of +// the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint(), defaultPort) + if err != nil { + return nil, err + } + + // IP address. + if ipAddr, ok := formatIP(host); ok { + addr := []resolver.Address{{Addr: ipAddr + ":" + port}} + cc.UpdateState(resolver.State{Addresses: addr}) + return deadResolver{}, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + rn: make(chan struct{}, 1), + disableServiceConfig: opts.DisableServiceConfig, + } + + if target.URL.Host == "" { + d.resolver = defaultResolver + } else { + d.resolver, err = newNetResolver(target.URL.Host) + if err != nil { + return nil, err + } + } + + d.wg.Add(1) + go d.watcher() + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +type netResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +// deadResolver is a resolver that does nothing. +type deadResolver struct{} + +func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (deadResolver) Close() {} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + host string + port string + resolver netResolver + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the + // target. + rn chan struct{} + // wg is used to enforce Close() to return after the watcher() goroutine has + // finished. Otherwise, data race will be possible. [Race Example] in + // dns_resolver_test we replace the real lookup functions with mocked ones to + // facilitate testing. If Close() doesn't wait for watcher() goroutine + // finishes, race detector sometimes will warns lookup (READ the lookup + // function pointers) inside watcher() goroutine has data race with + // replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool +} + +// ResolveNow invoke an immediate resolution of the target that this +// dnsResolver watches. +func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + backoffIndex := 1 + for { + state, err := d.lookup() + if err != nil { + // Report error to the underlying grpc.ClientConn. + d.cc.ReportError(err) + } else { + err = d.cc.UpdateState(*state) + } + + var timer *time.Timer + if err == nil { + // Success resolving, wait for the next ResolveNow. However, also wait 30 + // seconds at the very least to prevent constantly re-resolving. + backoffIndex = 1 + timer = newTimerDNSResRate(minDNSResRate) + select { + case <-d.ctx.Done(): + timer.Stop() + return + case <-d.rn: + } + } else { + // Poll on an error found in DNS Resolver or an error received from + // ClientConn. + timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + backoffIndex++ + } + select { + case <-d.ctx.Done(): + timer.Stop() + return + case <-timer.C: + } + } +} + +func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { + if !EnableSRVLookups { + return nil, nil + } + var newAddrs []resolver.Address + _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) + if err != nil { + err = handleDNSError(err, "SRV") // may become nil + return nil, err + } + for _, s := range srvs { + lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) + if err != nil { + err = handleDNSError(err, "A") // may become nil + if err == nil { + // If there are other SRV records, look them up and ignore this + // one that does not exist. + continue + } + return nil, err + } + for _, a := range lbAddrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target}) + } + } + return newAddrs, nil +} + +func handleDNSError(err error, lookupType string) error { + dnsErr, ok := err.(*net.DNSError) + if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + // Timeouts and temporary errors should be communicated to gRPC to + // attempt another DNS query (with backoff). Other errors should be + // suppressed (they may represent the absence of a TXT record). + return nil + } + if err != nil { + err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) + logger.Info(err) + } + return err +} + +func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { + ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) + if err != nil { + if envconfig.TXTErrIgnore { + return nil + } + if err = handleDNSError(err, "TXT"); err != nil { + return &serviceconfig.ParseResult{Err: err} + } + return nil + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as + // service config. + if !strings.HasPrefix(res, txtAttribute) { + logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) + // This is not an error; it is the equivalent of not having a service + // config. + return nil + } + sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) + return d.cc.ParseServiceConfig(sc) +} + +func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { + addrs, err := d.resolver.LookupHost(d.ctx, d.host) + if err != nil { + err = handleDNSError(err, "A") + return nil, err + } + newAddrs := make([]resolver.Address, 0, len(addrs)) + for _, a := range addrs { + ip, ok := formatIP(a) + if !ok { + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) + } + addr := ip + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs, nil +} + +func (d *dnsResolver) lookup() (*resolver.State, error) { + srv, srvErr := d.lookupSRV() + addrs, hostErr := d.lookupHost() + if hostErr != nil && (srvErr != nil || len(srv) == 0) { + return nil, hostErr + } + + state := resolver.State{Addresses: addrs} + if len(srv) > 0 { + state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) + } + if !d.disableServiceConfig { + state.ServiceConfig = d.lookupTXT() + } + return &state, nil +} + +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and +// ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string and default port, returns +// formatted host and port info. If target doesn't specify a port, set the port +// to be the defaultPort. If target is in IPv6 format and host-name is enclosed +// in square brackets, brackets are stripped when setting the host. +// examples: +// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443" +// target: ":80" defaultPort: "443" returns host: "localhost", port: "80" +func parseTarget(target, defaultPort string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + if port == "" { + // If the port field is empty (target ends with colon), e.g. "[::1]:", + // this is an error. + return "", "", errEndsWithColon + } + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", + // the local system is assumed. + host = "localhost" + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + return grpcrand.Intn(100)+1 <= *a +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + logger.Warningf("dns: error parsing service config json: %v", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + logger.Warningf("dns: error getting client hostname: %v", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go new file mode 100644 index 0000000000..afac56572a --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import ( + "errors" + + "google.golang.org/grpc/resolver" +) + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint() == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go new file mode 100644 index 0000000000..1609116877 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -0,0 +1,74 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package unix implements a resolver for unix targets. +package unix + +import ( + "fmt" + + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/resolver" +) + +const unixScheme = "unix" +const unixAbstractScheme = "unix-abstract" + +type builder struct { + scheme string +} + +func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) + } + + // gRPC was parsing the dial target manually before PR #4817, and we + // switched to using url.Parse() in that PR. To avoid breaking existing + // resolver implementations we ended up stripping the leading "/" from the + // endpoint. This obviously does not work for the "unix" scheme. Hence we + // end up using the parsed URL instead. + endpoint := target.URL.Path + if endpoint == "" { + endpoint = target.URL.Opaque + } + addr := resolver.Address{Addr: endpoint} + if b.scheme == unixAbstractScheme { + // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do + // not want trailing \0 in address. + addr.Addr = "@" + addr.Addr + } + cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}}) + return &nopResolver{}, nil +} + +func (b *builder) Scheme() string { + return b.scheme +} + +type nopResolver struct { +} + +func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (*nopResolver) Close() {} + +func init() { + resolver.Register(&builder{scheme: unixScheme}) + resolver.Register(&builder{scheme: unixAbstractScheme}) +} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go new file mode 100644 index 0000000000..11d82afcc7 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package serviceconfig + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Duration defines JSON marshal and unmarshal methods to conform to the +// protobuf JSON spec defined [here]. +// +// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration +type Duration time.Duration + +func (d Duration) String() string { + return fmt.Sprint(time.Duration(d)) +} + +// MarshalJSON converts from d to a JSON string output. +func (d Duration) MarshalJSON() ([]byte, error) { + ns := time.Duration(d).Nanoseconds() + sec := ns / int64(time.Second) + ns = ns % int64(time.Second) + + var sign string + if sec < 0 || ns < 0 { + sign, sec, ns = "-", -1*sec, -1*ns + } + + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision. + str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, ".000") + return []byte(fmt.Sprintf("\"%ss\"", str)), nil +} + +// UnmarshalJSON unmarshals b as a duration JSON string into d. +func (d *Duration) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !strings.HasSuffix(s, "s") { + return fmt.Errorf("malformed duration %q: missing seconds unit", s) + } + neg := false + if s[0] == '-' { + neg = true + s = s[1:] + } + ss := strings.SplitN(s[:len(s)-1], ".", 3) + if len(ss) > 2 { + return fmt.Errorf("malformed duration %q: too many decimals", s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var sec, ns int64 + if len(ss[0]) > 0 { + var err error + if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + // Maximum seconds value per the durationpb spec. + const maxProtoSeconds = 315_576_000_000 + if sec > maxProtoSeconds { + return fmt.Errorf("out of range: %q", s) + } + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return fmt.Errorf("malformed duration %q: too many digits after decimal", s) + } + var err error + if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + for i := 9; i > len(ss[1]); i-- { + ns *= 10 + } + hasDigits = true + } + if !hasDigits { + return fmt.Errorf("malformed duration %q: contains no numbers", s) + } + + if neg { + sec *= -1 + ns *= -1 + } + + // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. + const maxSeconds = math.MaxInt64 / int64(time.Second) + const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) + const minSeconds = math.MinInt64 / int64(time.Second) + const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) + + if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { + *d = Duration(math.MaxInt64) + } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { + *d = Duration(math.MinInt64) + } else { + *d = Duration(sec*int64(time.Second) + ns) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go new file mode 100644 index 0000000000..51e733e495 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -0,0 +1,180 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig contains utility functions to parse service config. +package serviceconfig + +import ( + "encoding/json" + "fmt" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + externalserviceconfig "google.golang.org/grpc/serviceconfig" +) + +var logger = grpclog.Component("core") + +// BalancerConfig wraps the name and config associated with one load balancing +// policy. It corresponds to a single entry of the loadBalancingConfig field +// from ServiceConfig. +// +// It implements the json.Unmarshaler interface. +// +// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247 +type BalancerConfig struct { + Name string + Config externalserviceconfig.LoadBalancingConfig +} + +type intermediateBalancerConfig []map[string]json.RawMessage + +// MarshalJSON implements the json.Marshaler interface. +// +// It marshals the balancer and config into a length-1 slice +// ([]map[string]config). +func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { + if bc.Config == nil { + // If config is nil, return empty config `{}`. + return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil + } + c, err := json.Marshal(bc.Config) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// +// ServiceConfig contains a list of loadBalancingConfigs, each with a name and +// config. This method iterates through that list in order, and stops at the +// first policy that is supported. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. +func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { + var ir intermediateBalancerConfig + err := json.Unmarshal(b, &ir) + if err != nil { + return err + } + + var names []string + for i, lbcfg := range ir { + if len(lbcfg) != 1 { + return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg) + } + + var ( + name string + jsonCfg json.RawMessage + ) + // Get the key:value pair from the map. We have already made sure that + // the map contains a single entry. + for name, jsonCfg = range lbcfg { + } + + names = append(names, name) + builder := balancer.Get(name) + if builder == nil { + // If the balancer is not registered, move on to the next config. + // This is not an error. + continue + } + bc.Name = name + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + if string(jsonCfg) != "{}" { + logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg)) + } + // Stop at this, though the builder doesn't support parsing config. + return nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err) + } + bc.Config = cfg + return nil + } + // This is reached when the for loop iterates over all entries, but didn't + // return. This means we had a loadBalancingConfig slice but did not + // encounter a registered policy. The config is considered invalid in this + // case. + return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names) +} + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int + // RetryPolicy configures retry options for the method. + RetryPolicy *RetryPolicy +} + +// RetryPolicy defines the go-native version of the retry policy defined by the +// service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type RetryPolicy struct { + // MaxAttempts is the maximum number of attempts, including the original RPC. + // + // This field is required and must be two or greater. + MaxAttempts int + + // Exponential backoff parameters. The initial retry attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at + // random(0, + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). + // + // These fields are required and must be greater than zero. + InitialBackoff time.Duration + MaxBackoff time.Duration + BackoffMultiplier float64 + + // The set of status codes which may be retried. + // + // Status codes are specified as strings, e.g., "UNAVAILABLE". + // + // This field is required and must be non-empty. + // Note: a set is used to store this for easy lookup. + RetryableStatusCodes map[codes.Code]bool +} diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go new file mode 100644 index 0000000000..4cf85cad9f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -0,0 +1,176 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +) + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...any) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// Err returns an error representing c and msg. If c is OK, returns nil. +func Err(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...any) error { + return Err(c, fmt.Sprintf(format, a...)) +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return &Error{s: s} +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []any { + if s == nil || s.s == nil { + return nil + } + details := make([]any, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} + +func (s *Status) String() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message()) +} + +// Error wraps a pointer of a status proto. It implements error and Status, +// and a nil *Error should never be returned by this package. +type Error struct { + s *Status +} + +func (e *Error) Error() string { + return e.s.String() +} + +// GRPCStatus returns the Status represented by se. +func (e *Error) GRPCStatus() *Status { + return e.s +} + +// Is implements future error.Is functionality. +// A Error is equivalent if the code and message are identical. +func (e *Error) Is(target error) bool { + tse, ok := target.(*Error) + if !ok { + return false + } + return proto.Equal(e.s.s, tse.s.s) +} + +// IsRestrictedControlPlaneCode returns whether the status includes a code +// restricted for control plane usage as defined by gRFC A54. +func IsRestrictedControlPlaneCode(s *Status) bool { + switch s.Code() { + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: + return true + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go new file mode 100644 index 0000000000..b3a72276de --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go @@ -0,0 +1,112 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level operating system +// stats/info. +package syscall + +import ( + "fmt" + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" + "google.golang.org/grpc/grpclog" +) + +var logger = grpclog.Component("core") + +// GetCPUTime returns the how much CPU time has passed since the start of this process. +func GetCPUTime() int64 { + var ts unix.Timespec + if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil { + logger.Fatal(err) + } + return ts.Nano() +} + +// Rusage is an alias for syscall.Rusage under linux environment. +type Rusage = syscall.Rusage + +// GetRusage returns the resource usage of current process. +func GetRusage() *Rusage { + rusage := new(Rusage) + syscall.Getrusage(syscall.RUSAGE_SELF, rusage) + return rusage +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + var ( + utimeDiffs = latest.Utime.Sec - first.Utime.Sec + utimeDiffus = latest.Utime.Usec - first.Utime.Usec + stimeDiffs = latest.Stime.Sec - first.Stime.Sec + stimeDiffus = latest.Stime.Usec - first.Stime.Usec + ) + + uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6 + sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6 + + return uTimeElapsed, sTimeElapsed +} + +// SetTCPUserTimeout sets the TCP user timeout on a connection's socket +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + // not a TCP connection. exit early + return nil + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + return fmt.Errorf("error getting raw connection: %v", err) + } + err = rawConn.Control(func(fd uintptr) { + err = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int(timeout/time.Millisecond)) + }) + if err != nil { + return fmt.Errorf("error setting option on socket: %v", err) + } + + return nil +} + +// GetTCPUserTimeout gets the TCP user timeout on a connection's socket +func GetTCPUserTimeout(conn net.Conn) (opt int, err error) { + tcpconn, ok := conn.(*net.TCPConn) + if !ok { + err = fmt.Errorf("conn is not *net.TCPConn. got %T", conn) + return + } + rawConn, err := tcpconn.SyscallConn() + if err != nil { + err = fmt.Errorf("error getting raw connection: %v", err) + return + } + err = rawConn.Control(func(fd uintptr) { + opt, err = syscall.GetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT) + }) + if err != nil { + err = fmt.Errorf("error getting option on socket: %v", err) + return + } + + return +} diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go new file mode 100644 index 0000000000..999f52cd75 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go @@ -0,0 +1,77 @@ +//go:build !linux +// +build !linux + +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package syscall provides functionalities that grpc uses to get low-level +// operating system stats/info. +package syscall + +import ( + "net" + "sync" + "time" + + "google.golang.org/grpc/grpclog" +) + +var once sync.Once +var logger = grpclog.Component("core") + +func log() { + once.Do(func() { + logger.Info("CPU time info is unavailable on non-linux environments.") + }) +} + +// GetCPUTime returns the how much CPU time has passed since the start of this +// process. It always returns 0 under non-linux environments. +func GetCPUTime() int64 { + log() + return 0 +} + +// Rusage is an empty struct under non-linux environments. +type Rusage struct{} + +// GetRusage is a no-op function under non-linux environments. +func GetRusage() *Rusage { + log() + return nil +} + +// CPUTimeDiff returns the differences of user CPU time and system CPU time used +// between two Rusage structs. It a no-op function for non-linux environments. +func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) { + log() + return 0, 0 +} + +// SetTCPUserTimeout is a no-op function under non-linux environments. +func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error { + log() + return nil +} + +// GetTCPUserTimeout is a no-op function under non-linux environments. +// A negative return value indicates the operation is not supported +func GetTCPUserTimeout(conn net.Conn) (int, error) { + log() + return -1, nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go new file mode 100644 index 0000000000..070680edba --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go @@ -0,0 +1,141 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows will be increased + // to. TCP typically limits this to 4MB, but some systems go up to 16MB. + // Since this is only a limit, it is safe to make it optimistic. + bdpLimit = (1 << 20) * 16 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go new file mode 100644 index 0000000000..b330ccedc8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -0,0 +1,1007 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "errors" + "fmt" + "net" + "runtime" + "strconv" + "sync" + "sync/atomic" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/status" +) + +var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { + e.SetMaxDynamicTableSizeLimit(v) +} + +type itemNode struct { + it any + next *itemNode +} + +type itemList struct { + head *itemNode + tail *itemNode +} + +func (il *itemList) enqueue(i any) { + n := &itemNode{it: i} + if il.tail == nil { + il.head, il.tail = n, n + return + } + il.tail.next = n + il.tail = n +} + +// peek returns the first item in the list without removing it from the +// list. +func (il *itemList) peek() any { + return il.head.it +} + +func (il *itemList) dequeue() any { + if il.head == nil { + return nil + } + i := il.head.it + il.head = il.head.next + if il.head == nil { + il.tail = nil + } + return i +} + +func (il *itemList) dequeueAll() *itemNode { + h := il.head + il.head, il.tail = nil, nil + return h +} + +func (il *itemList) isEmpty() bool { + return il.head == nil +} + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +// maxQueuedTransportResponseFrames is the most queued "transport response" +// frames we will buffer before preventing new reads from occurring on the +// transport. These are control frames sent in response to client requests, +// such as RST_STREAM due to bad headers or settings acks. +const maxQueuedTransportResponseFrames = 50 + +type cbItem interface { + isTransportResponseFrame() bool +} + +// registerStream is used to register an incoming stream with loopy writer. +type registerStream struct { + streamID uint32 + wq *writeQuota +} + +func (*registerStream) isTransportResponseFrame() bool { return false } + +// headerFrame is also used to register stream on the client-side. +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool // Valid on server side. + initStream func(uint32) error // Used only on the client side. + onWrite func() + wq *writeQuota // write quota for the stream created. + cleanup *cleanupStream // Valid on the server side. + onOrphaned func(error) // Valid on client-side +} + +func (h *headerFrame) isTransportResponseFrame() bool { + return h.cleanup != nil && h.cleanup.rst // Results in a RST_STREAM +} + +type cleanupStream struct { + streamID uint32 + rst bool + rstCode http2.ErrCode + onWrite func() +} + +func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM + +type earlyAbortStream struct { + httpStatus uint32 + streamID uint32 + contentSubtype string + status *status.Status + rst bool +} + +func (*earlyAbortStream) isTransportResponseFrame() bool { return false } + +type dataFrame struct { + streamID uint32 + endStream bool + h []byte + d []byte + // onEachWrite is called every time + // a part of d is written out. + onEachWrite func() +} + +func (*dataFrame) isTransportResponseFrame() bool { return false } + +type incomingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*incomingWindowUpdate) isTransportResponseFrame() bool { return false } + +type outgoingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +func (*outgoingWindowUpdate) isTransportResponseFrame() bool { + return false // window updates are throttled by thresholds +} + +type incomingSettings struct { + ss []http2.Setting +} + +func (*incomingSettings) isTransportResponseFrame() bool { return true } // Results in a settings ACK + +type outgoingSettings struct { + ss []http2.Setting +} + +func (*outgoingSettings) isTransportResponseFrame() bool { return false } + +type incomingGoAway struct { +} + +func (*incomingGoAway) isTransportResponseFrame() bool { return false } + +type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool + closeConn error // if set, loopyWriter will exit, resulting in conn closure +} + +func (*goAway) isTransportResponseFrame() bool { return false } + +type ping struct { + ack bool + data [8]byte +} + +func (*ping) isTransportResponseFrame() bool { return true } + +type outFlowControlSizeRequest struct { + resp chan uint32 +} + +func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } + +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + +type outStreamState int + +const ( + active outStreamState = iota + empty + waitingOnStreamQuota +) + +type outStream struct { + id uint32 + state outStreamState + itl *itemList + bytesOutStanding int + wq *writeQuota + + next *outStream + prev *outStream +} + +func (s *outStream) deleteSelf() { + if s.prev != nil { + s.prev.next = s.next + } + if s.next != nil { + s.next.prev = s.prev + } + s.next, s.prev = nil, nil +} + +type outStreamList struct { + // Following are sentinel objects that mark the + // beginning and end of the list. They do not + // contain any item lists. All valid objects are + // inserted in between them. + // This is needed so that an outStream object can + // deleteSelf() in O(1) time without knowing which + // list it belongs to. + head *outStream + tail *outStream +} + +func newOutStreamList() *outStreamList { + head, tail := new(outStream), new(outStream) + head.next = tail + tail.prev = head + return &outStreamList{ + head: head, + tail: tail, + } +} + +func (l *outStreamList) enqueue(s *outStream) { + e := l.tail.prev + e.next = s + s.prev = e + s.next = l.tail + l.tail.prev = s +} + +// remove from the beginning of the list. +func (l *outStreamList) dequeue() *outStream { + b := l.head.next + if b == l.tail { + return nil + } + b.deleteSelf() + return b +} + +// controlBuffer is a way to pass information to loopy. +// Information is passed as specific struct types called control frames. +// A control frame not only represents data, messages or headers to be sent out +// but can also be used to instruct loopy to update its internal state. +// It shouldn't be confused with an HTTP2 frame, although some of the control frames +// like dataFrame and headerFrame do go out on wire as HTTP2 frames. +type controlBuffer struct { + ch chan struct{} + done <-chan struct{} + mu sync.Mutex + consumerWaiting bool + list *itemList + err error + + // transportResponseFrames counts the number of queued items that represent + // the response of an action initiated by the peer. trfChan is created + // when transportResponseFrames >= maxQueuedTransportResponseFrames and is + // closed and nilled when transportResponseFrames drops below the + // threshold. Both fields are protected by mu. + transportResponseFrames int + trfChan atomic.Value // chan struct{} +} + +func newControlBuffer(done <-chan struct{}) *controlBuffer { + return &controlBuffer{ + ch: make(chan struct{}, 1), + list: &itemList{}, + done: done, + } +} + +// throttle blocks if there are too many incomingSettings/cleanupStreams in the +// controlbuf. +func (c *controlBuffer) throttle() { + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { + select { + case <-ch: + case <-c.done: + } + } +} + +func (c *controlBuffer) put(it cbItem) error { + _, err := c.executeAndPut(nil, it) + return err +} + +func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { + var wakeUp bool + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if f != nil { + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + } + if c.consumerWaiting { + wakeUp = true + c.consumerWaiting = false + } + c.list.enqueue(it) + if it.isTransportResponseFrame() { + c.transportResponseFrames++ + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are adding the frame that puts us over the threshold; create + // a throttling channel. + c.trfChan.Store(make(chan struct{})) + } + } + c.mu.Unlock() + if wakeUp { + select { + case c.ch <- struct{}{}: + default: + } + } + return true, nil +} + +// Note argument f should never be nil. +func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + c.mu.Unlock() + return true, nil +} + +func (c *controlBuffer) get(block bool) (any, error) { + for { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return nil, c.err + } + if !c.list.isEmpty() { + h := c.list.dequeue().(cbItem) + if h.isTransportResponseFrame() { + if c.transportResponseFrames == maxQueuedTransportResponseFrames { + // We are removing the frame that put us over the + // threshold; close and clear the throttling channel. + ch := c.trfChan.Load().(chan struct{}) + close(ch) + c.trfChan.Store((chan struct{})(nil)) + } + c.transportResponseFrames-- + } + c.mu.Unlock() + return h, nil + } + if !block { + c.mu.Unlock() + return nil, nil + } + c.consumerWaiting = true + c.mu.Unlock() + select { + case <-c.ch: + case <-c.done: + return nil, errors.New("transport closed by client") + } + } +} + +func (c *controlBuffer) finish() { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return + } + c.err = ErrConnClosing + // There may be headers for streams in the control buffer. + // These streams need to be cleaned out since the transport + // is still not aware of these yet. + for head := c.list.dequeueAll(); head != nil; head = head.next { + hdr, ok := head.it.(*headerFrame) + if !ok { + continue + } + if hdr.onOrphaned != nil { // It will be nil on the server-side. + hdr.onOrphaned(ErrConnClosing) + } + } + // In case throttle() is currently in flight, it needs to be unblocked. + // Otherwise, the transport may not close, since the transport is closed by + // the reader encountering the connection error. + ch, _ := c.trfChan.Load().(chan struct{}) + if ch != nil { + close(ch) + } + c.trfChan.Store((chan struct{})(nil)) + c.mu.Unlock() +} + +type side int + +const ( + clientSide side = iota + serverSide +) + +// Loopy receives frames from the control buffer. +// Each frame is handled individually; most of the work done by loopy goes +// into handling data frames. Loopy maintains a queue of active streams, and each +// stream maintains a queue of data frames; as loopy receives data frames +// it gets added to the queue of the relevant stream. +// Loopy goes over this list of active streams by processing one node every iteration, +// thereby closely resemebling to a round-robin scheduling over all streams. While +// processing a stream, loopy writes out data bytes from this stream capped by the min +// of http2MaxFrameLen, connection-level flow control and stream-level flow control. +type loopyWriter struct { + side side + cbuf *controlBuffer + sendQuota uint32 + oiws uint32 // outbound initial window size. + // estdStreams is map of all established streams that are not cleaned-up yet. + // On client-side, this is all streams whose headers were sent out. + // On server-side, this is all streams whose headers were received. + estdStreams map[uint32]*outStream // Established streams. + // activeStreams is a linked-list of all streams that have data to send and some + // stream-level flow control quota. + // Each of these streams internally have a list of data items(and perhaps trailers + // on the server-side) to be sent out. + activeStreams *outStreamList + framer *framer + hBuf *bytes.Buffer // The buffer for HPACK encoding. + hEnc *hpack.Encoder // HPACK encoder. + bdpEst *bdpEstimator + draining bool + conn net.Conn + logger *grpclog.PrefixLogger + + // Side-specific handlers + ssGoAwayHandler func(*goAway) (bool, error) +} + +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger) *loopyWriter { + var buf bytes.Buffer + l := &loopyWriter{ + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + conn: conn, + logger: logger, + } + return l +} + +const minBatchSize = 1000 + +// run should be run in a separate goroutine. +// It reads control frames from controlBuf and processes them by: +// 1. Updating loopy's internal state, or/and +// 2. Writing out HTTP2 frames on the wire. +// +// Loopy keeps all active streams with data to send in a linked-list. +// All streams in the activeStreams linked-list must have both: +// 1. Data to send, and +// 2. Stream level flow control quota available. +// +// In each iteration of run loop, other than processing the incoming control +// frame, loopy calls processData, which processes one node from the +// activeStreams linked-list. This results in writing of HTTP2 frames into an +// underlying write buffer. When there's no more control frames to read from +// controlBuf, loopy flushes the write buffer. As an optimization, to increase +// the batch size for each flush, loopy yields the processor, once if the batch +// size is too low to give stream goroutines a chance to fill it up. +// +// Upon exiting, if the error causing the exit is not an I/O error, run() +// flushes and closes the underlying connection. Otherwise, the connection is +// left open to allow the I/O error to be encountered by the reader instead. +func (l *loopyWriter) run() (err error) { + defer func() { + if l.logger.V(logLevel) { + l.logger.Infof("loopyWriter exiting with error: %v", err) + } + if !isIOError(err) { + l.framer.writer.Flush() + l.conn.Close() + } + l.cbuf.finish() + }() + for { + it, err := l.cbuf.get(true) + if err != nil { + return err + } + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + gosched := true + hasdata: + for { + it, err := l.cbuf.get(false) + if err != nil { + return err + } + if it != nil { + if err = l.handle(it); err != nil { + return err + } + if _, err = l.processData(); err != nil { + return err + } + continue hasdata + } + isEmpty, err := l.processData() + if err != nil { + return err + } + if !isEmpty { + continue hasdata + } + if gosched { + gosched = false + if l.framer.writer.offset < minBatchSize { + runtime.Gosched() + continue hasdata + } + } + l.framer.writer.Flush() + break hasdata + } + } +} + +func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { + return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) +} + +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) { + // Otherwise update the quota. + if w.streamID == 0 { + l.sendQuota += w.increment + return + } + // Find the stream and update it. + if str, ok := l.estdStreams[w.streamID]; ok { + str.bytesOutStanding -= int(w.increment) + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { + str.state = active + l.activeStreams.enqueue(str) + return + } + } +} + +func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { + return l.framer.fr.WriteSettings(s.ss...) +} + +func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { + l.applySettings(s.ss) + return l.framer.fr.WriteSettingsAck() +} + +func (l *loopyWriter) registerStreamHandler(h *registerStream) { + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + l.estdStreams[h.streamID] = str +} + +func (l *loopyWriter) headerHandler(h *headerFrame) error { + if l.side == serverSide { + str, ok := l.estdStreams[h.streamID] + if !ok { + if l.logger.V(logLevel) { + l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID) + } + return nil + } + // Case 1.A: Server is responding back with headers. + if !h.endStream { + return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) + } + // else: Case 1.B: Server wants to close stream. + + if str.state != empty { // either active or waiting on stream quota. + // add it str's list of items. + str.itl.enqueue(h) + return nil + } + if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { + return err + } + return l.cleanupStreamHandler(h.cleanup) + } + // Case 2: Client wants to originate stream. + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + return l.originateStream(str, h) +} + +func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { + // l.draining is set when handling GoAway. In which case, we want to avoid + // creating new streams. + if l.draining { + // TODO: provide a better error with the reason we are in draining. + hdr.onOrphaned(errStreamDrain) + return nil + } + if err := hdr.initStream(str.id); err != nil { + return err + } + if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + return err + } + l.estdStreams[str.id] = str + return nil +} + +func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { + if onWrite != nil { + onWrite() + } + l.hBuf.Reset() + for _, f := range hf { + if err := l.hEnc.WriteField(f); err != nil { + if l.logger.V(logLevel) { + l.logger.Warningf("Encountered error while encoding headers: %v", err) + } + } + } + var ( + err error + endHeaders, first bool + ) + first = true + for !endHeaders { + size := l.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: l.hBuf.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + }) + } else { + err = l.framer.fr.WriteContinuation( + streamID, + endHeaders, + l.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) preprocessData(df *dataFrame) { + str, ok := l.estdStreams[df.streamID] + if !ok { + return + } + // If we got data for a stream it means that + // stream was originated and the headers were sent out. + str.itl.enqueue(df) + if str.state == empty { + str.state = active + l.activeStreams.enqueue(str) + } +} + +func (l *loopyWriter) pingHandler(p *ping) error { + if !p.ack { + l.bdpEst.timesnap(p.data) + } + return l.framer.fr.WritePing(p.ack, p.data) + +} + +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) { + o.resp <- l.sendQuota +} + +func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { + c.onWrite() + if str, ok := l.estdStreams[c.streamID]; ok { + // On the server side it could be a trailers-only response or + // a RST_STREAM before stream initialization thus the stream might + // not be established yet. + delete(l.estdStreams, c.streamID) + str.deleteSelf() + } + if c.rst { // If RST_STREAM needs to be sent. + if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { + return err + } + } + if l.draining && len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. + return errors.New("finished processing active streams while in draining mode") + } + return nil +} + +func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error { + if l.side == clientSide { + return errors.New("earlyAbortStream not handled on client") + } + // In case the caller forgets to set the http status, default to 200. + if eas.httpStatus == 0 { + eas.httpStatus = 200 + } + headerFields := []hpack.HeaderField{ + {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))}, + {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)}, + {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))}, + {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())}, + } + + if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil { + return err + } + if eas.rst { + if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { + if l.side == clientSide { + l.draining = true + if len(l.estdStreams) == 0 { + // Flush and close the connection; we are done with it. + return errors.New("received GOAWAY with no active streams") + } + } + return nil +} + +func (l *loopyWriter) goAwayHandler(g *goAway) error { + // Handling of outgoing GoAway is very specific to side. + if l.ssGoAwayHandler != nil { + draining, err := l.ssGoAwayHandler(g) + if err != nil { + return err + } + l.draining = draining + } + return nil +} + +func (l *loopyWriter) handle(i any) error { + switch i := i.(type) { + case *incomingWindowUpdate: + l.incomingWindowUpdateHandler(i) + case *outgoingWindowUpdate: + return l.outgoingWindowUpdateHandler(i) + case *incomingSettings: + return l.incomingSettingsHandler(i) + case *outgoingSettings: + return l.outgoingSettingsHandler(i) + case *headerFrame: + return l.headerHandler(i) + case *registerStream: + l.registerStreamHandler(i) + case *cleanupStream: + return l.cleanupStreamHandler(i) + case *earlyAbortStream: + return l.earlyAbortStreamHandler(i) + case *incomingGoAway: + return l.incomingGoAwayHandler(i) + case *dataFrame: + l.preprocessData(i) + case *ping: + return l.pingHandler(i) + case *goAway: + return l.goAwayHandler(i) + case *outFlowControlSizeRequest: + l.outFlowControlSizeRequestHandler(i) + case closeConnection: + // Just return a non-I/O error and run() will flush and close the + // connection. + return ErrConnClosing + default: + return fmt.Errorf("transport: unknown control message type %T", i) + } + return nil +} + +func (l *loopyWriter) applySettings(ss []http2.Setting) { + for _, s := range ss { + switch s.ID { + case http2.SettingInitialWindowSize: + o := l.oiws + l.oiws = s.Val + if o < l.oiws { + // If the new limit is greater make all depleted streams active. + for _, stream := range l.estdStreams { + if stream.state == waitingOnStreamQuota { + stream.state = active + l.activeStreams.enqueue(stream) + } + } + } + case http2.SettingHeaderTableSize: + updateHeaderTblSize(l.hEnc, s.Val) + } + } +} + +// processData removes the first stream from active streams, writes out at most 16KB +// of its data and then puts it at the end of activeStreams if there's still more data +// to be sent and stream has some stream-level flow control. +func (l *loopyWriter) processData() (bool, error) { + if l.sendQuota == 0 { + return true, nil + } + str := l.activeStreams.dequeue() // Remove the first stream. + if str == nil { + return true, nil + } + dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream. + // A data item is represented by a dataFrame, since it later translates into + // multiple HTTP2 data frames. + // Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data. + // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the + // maximum possible HTTP2 frame size. + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame + // Client sends out empty data frame with endStream = true + if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + return false, err + } + str.itl.dequeue() // remove the empty data item from stream + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else { + l.activeStreams.enqueue(str) + } + return false, nil + } + var ( + buf []byte + ) + // Figure out the maximum size we can send + maxSize := http2MaxFrameLen + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control. + str.state = waitingOnStreamQuota + return false, nil + } else if maxSize > strQuota { + maxSize = strQuota + } + if maxSize > int(l.sendQuota) { // connection-level flow control. + maxSize = int(l.sendQuota) + } + // Compute how much of the header and data we can send within quota and max frame length + hSize := min(maxSize, len(dataItem.h)) + dSize := min(maxSize-hSize, len(dataItem.d)) + if hSize != 0 { + if dSize == 0 { + buf = dataItem.h + } else { + // We can add some data to grpc message header to distribute bytes more equally across frames. + // Copy on the stack to avoid generating garbage + var localBuf [http2MaxFrameLen]byte + copy(localBuf[:hSize], dataItem.h) + copy(localBuf[hSize:], dataItem.d[:dSize]) + buf = localBuf[:hSize+dSize] + } + } else { + buf = dataItem.d + } + + size := hSize + dSize + + // Now that outgoing flow controls are checked we can replenish str's write quota + str.wq.replenish(size) + var endStream bool + // If this is the last data message on this stream and all of it can be written in this iteration. + if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size { + endStream = true + } + if dataItem.onEachWrite != nil { + dataItem.onEachWrite() + } + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + return false, err + } + str.bytesOutStanding += size + l.sendQuota -= uint32(size) + dataItem.h = dataItem.h[hSize:] + dataItem.d = dataItem.d[dSize:] + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + str.itl.dequeue() + } + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. + str.state = waitingOnStreamQuota + } else { // Otherwise add it back to the list of active streams. + l.activeStreams.enqueue(str) + } + return false, nil +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go new file mode 100644 index 0000000000..bc8ee07474 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -0,0 +1,55 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "math" + "time" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = 20 * time.Second + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = 2 * time.Hour + defaultServerKeepaliveTimeout = 20 * time.Second + defaultKeepalivePolicyMinTime = 5 * time.Minute + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultWriteQuota is the default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultWriteQuota = 64 * 1024 + defaultClientMaxHeaderListSize = uint32(16 << 20) + defaultServerMaxHeaderListSize = uint32(16 << 20) +) + +// MaxStreamID is the upper bound for the stream ID before the current +// transport gracefully closes and new transport is created for subsequent RPCs. +// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit +// integer. It's exported so that tests can override it. +var MaxStreamID = uint32(math.MaxInt32 * 3 / 4) diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go new file mode 100644 index 0000000000..97198c5158 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go @@ -0,0 +1,215 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "math" + "sync" + "sync/atomic" +) + +// writeQuota is a soft limit on the amount of data a stream can +// schedule before some of it is written out. +type writeQuota struct { + quota int32 + // get waits on read from when quota goes less than or equal to zero. + // replenish writes on it when quota goes positive again. + ch chan struct{} + // done is triggered in error case. + done <-chan struct{} + // replenish is called by loopyWriter to give quota back to. + // It is implemented as a field so that it can be updated + // by tests. + replenish func(n int) +} + +func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { + w := &writeQuota{ + quota: sz, + ch: make(chan struct{}, 1), + done: done, + } + w.replenish = w.realReplenish + return w +} + +func (w *writeQuota) get(sz int32) error { + for { + if atomic.LoadInt32(&w.quota) > 0 { + atomic.AddInt32(&w.quota, -sz) + return nil + } + select { + case <-w.ch: + continue + case <-w.done: + return errStreamDone + } + } +} + +func (w *writeQuota) realReplenish(n int) { + sz := int32(n) + a := atomic.AddInt32(&w.quota, sz) + b := a - sz + if b <= 0 && a > 0 { + select { + case w.ch <- struct{}{}: + default: + } + } +} + +type trInFlow struct { + limit uint32 + unacked uint32 + effectiveWindowSize uint32 +} + +func (f *trInFlow) newLimit(n uint32) uint32 { + d := n - f.limit + f.limit = n + f.updateEffectiveWindowSize() + return d +} + +func (f *trInFlow) onData(n uint32) uint32 { + f.unacked += n + if f.unacked >= f.limit/4 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w + } + f.updateEffectiveWindowSize() + return 0 +} + +func (f *trInFlow) reset() uint32 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w +} + +func (f *trInFlow) updateEffectiveWindowSize() { + atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) +} + +func (f *trInFlow) getSize() uint32 { + return atomic.LoadUint32(&f.effectiveWindowSize) +} + +// TODO(mmukhi): Simplify this code. +// inFlow deals with inbound flow control +type inFlow struct { + mu sync.Mutex + // The inbound flow control limit for pending data. + limit uint32 + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) { + f.mu.Lock() + f.limit = n + f.mu.Unlock() +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + defer f.mu.Unlock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + return f.delta + } + return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { + f.mu.Lock() + f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit+f.delta { + limit := f.limit + rcvd := f.pendingData + f.pendingUpdate + f.mu.Unlock() + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) + } + f.mu.Unlock() + return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { + f.mu.Lock() + if f.pendingData == 0 { + f.mu.Unlock() + return 0 + } + f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + wu := f.pendingUpdate + f.pendingUpdate = 0 + f.mu.Unlock() + return wu + } + f.mu.Unlock() + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go new file mode 100644 index 0000000000..98f80e3fa0 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -0,0 +1,480 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { + if r.ProtoMajor != 2 { + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) + } + if r.Method != "POST" { + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) + } + contentType := r.Header.Get("Content-Type") + // TODO: do we assume contentType is lowercase? we did before + contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) + if !validContentType { + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusUnsupportedMediaType) + return nil, errors.New(msg) + } + if _, ok := w.(http.Flusher); !ok { + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + contentType: contentType, + contentSubtype: contentSubtype, + stats: stats, + } + st.logger = prefixLoggerForServerHandlerTransport(st) + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { + msg := fmt.Sprintf("malformed grpc-timeout: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) + } + st.timeoutSet = true + st.timeout = to + } + + metakv := []string{"content-type", contentType} + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedHeader(k) { + continue + } + for _, v := range vv { + v, err := decodeMetadataHeader(k, v) + if err != nil { + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() + + // block concurrent WriteStatus calls + // e.g. grpc/(*serverStream).SendMsg/RecvMsg + writeStatusMu sync.Mutex + + // we just mirror the request content-type + contentType string + // we store both contentType and contentSubtype so we don't keep recreating them + // TODO make sure this is consistent across handler_server and http2_server + contentSubtype string + + stats []stats.Handler + logger *grpclog.PrefixLogger +} + +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if ht.logger.V(logLevel) { + ht.logger.Infof("Closing: %v", err) + } + close(ht.closedCh) + }) +} + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + select { + case <-ht.closedCh: + return ErrConnClosing + case ht.writes <- fn: + return nil + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.writeStatusMu.Lock() + defer ht.writeStatusMu.Unlock() + + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) + if m := st.Message(); m != "" { + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + } + + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to send undeclared Trailers after + // the headers have possibly been written. + h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) + } + } + } + }) + + if err == nil { // transport has not been closed + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + for _, sh := range ht.stats { + sh.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + } + ht.Close(errors.New("finished writing status")) + return err +} + +// writePendingHeaders sets common and custom headers on the first +// write call (Write, WriteHeader, or WriteStatus) +func (ht *serverHandlerTransport) writePendingHeaders(s *Stream) { + ht.writeCommonHeaders(s) + ht.writeCustomHeaders(s) +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", ht.contentType) + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + h.Add("Trailer", "Grpc-Status-Details-Bin") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +// writeCustomHeaders sets custom headers set on the stream via SetHeader +// on the first write call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { + h := ht.rw.Header() + + s.hdrMu.Lock() + for k, vv := range s.header { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + h.Add(k, encodeMetadataHeader(k, v)) + } + } + + s.hdrMu.Unlock() +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + headersWritten := s.updateHeaderSent() + return ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + ht.rw.Write(hdr) + ht.rw.Write(data) + ht.rw.(http.Flusher).Flush() + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + if err := s.SetHeader(md); err != nil { + return err + } + + headersWritten := s.updateHeaderSent() + err := ht.do(func() { + if !headersWritten { + ht.writePendingHeaders(s) + } + + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) + + if err == nil { + for _, sh := range ht.stats { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + sh.HandleRPC(s.Context(), &stats.OutHeader{ + Header: md.Copy(), + Compression: s.sendCompress, + }) + } + } + return err +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + ctx := ht.req.Context() + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(ctx, ht.timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + + // requestOver is closed when the status has been written via WriteStatus. + requestOver := make(chan struct{}) + go func() { + select { + case <-requestOver: + case <-ht.closedCh: + case <-ht.req.Context().Done(): + } + cancel() + ht.Close(errors.New("request is done processing")) + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + s.ctx = peer.NewContext(ctx, pr) + for _, sh := range ht.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: ht.RemoteAddr(), + Compression: s.recvCompress, + } + sh.HandleRPC(s.ctx, inHeader) + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, + windowHandler: func(int) {}, + } + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])}) + buf = buf[n:] + } + if err != nil { + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) + return + } + if len(buf) == 0 { + buf = make([]byte, readSize) + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn := <-ht.writes: + fn() + case <-ht.closedCh: + return + } + } +} + +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} + +func (ht *serverHandlerTransport) Drain(debugData string) { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// - io.EOF +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return status.Error(code, se.Error()) + } + } + if strings.Contains(err.Error(), "body closed by handler") { + return status.Error(codes.Canceled, err.Error()) + } + return connectionErrorf(true, err, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go new file mode 100644 index 0000000000..badab8acf3 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -0,0 +1,1797 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "fmt" + "io" + "math" + "net" + "net/http" + "path/filepath" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + istatus "google.golang.org/grpc/internal/status" + "google.golang.org/grpc/internal/syscall" + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// clientConnectionCounter counts the number of connections a client has +// initiated (equal to the number of http2Clients created). Must be accessed +// atomically. +var clientConnectionCounter uint64 + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address + md metadata.MD + conn net.Conn // underlying communication channel + loopy *loopyWriter + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} + + framer *framer + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + // Do not access controlBuf with mu held. + controlBuf *controlBuffer + fc *trInFlow + // The scheme used: https if TLS is on, http otherwise. + scheme string + + isSecure bool + + perRPCCreds []credentials.PerRPCCredentials + + kp keepalive.ClientParameters + keepaliveEnabled bool + + statsHandlers []stats.Handler + + initialWindowSize int32 + + // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE + maxSendHeaderListSize *uint32 + + bdpEst *bdpEstimator + + maxConcurrentStreams uint32 + streamQuota int64 + streamsQuotaAvailable chan struct{} + waitingStreams uint32 + nextID uint32 + registeredCompressors string + + // Do not access controlBuf with mu held. + mu sync.Mutex // guard the following variables + state transportState + activeStreams map[uint32]*Stream + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 + // goAwayReason records the http2.ErrCode and debug data received with the + // GoAway frame. + goAwayReason GoAwayReason + // goAwayDebugMessage contains a detailed human readable string about a + // GoAway frame, useful for error messages. + goAwayDebugMessage string + // A condition variable used to signal when the keepalive goroutine should + // go dormant. The condition for dormancy is based on the number of active + // streams and the `PermitWithoutStream` keepalive client parameter. And + // since the number of active streams is guarded by the above mutex, we use + // the same for this condition variable as well. + kpDormancyCond *sync.Cond + // A boolean to track whether the keepalive goroutine is dormant or not. + // This is checked before attempting to signal the above condition + // variable. + kpDormant bool + + // Fields below are for channelz metric collection. + channelzID *channelz.Identifier + czData *channelzData + + onClose func(GoAwayReason) + + bufferPool *bufferPool + + connectionID uint64 + logger *grpclog.PrefixLogger +} + +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { + address := addr.Addr + networkType, ok := networktype.Get(addr) + if fn != nil { + // Special handling for unix scheme with custom dialer. Back in the day, + // we did not have a unix resolver and therefore targets with a unix + // scheme would end up using the passthrough resolver. So, user's used a + // custom dialer in this case and expected the original dial target to + // be passed to the custom dialer. Now, we have a unix resolver. But if + // a custom dialer is specified, we want to retain the old behavior in + // terms of the address being passed to the custom dialer. + if networkType == "unix" && !strings.HasPrefix(address, "\x00") { + // Supported unix targets are either "unix://absolute-path" or + // "unix:relative-path". + if filepath.IsAbs(address) { + return fn(ctx, "unix://"+address) + } + return fn(ctx, "unix:"+address) + } + return fn(ctx, address) + } + if !ok { + networkType, address = parseDialTarget(address) + } + if networkType == "tcp" && useProxy { + return proxyDial(ctx, address, grpcUA) + } + return (&net.Dialer{}).DialContext(ctx, networkType, address) +} + +func isTemporary(err error) bool { + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return true +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { + scheme := "http" + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + // gRPC, resolver, balancer etc. can specify arbitrary data in the + // Attributes field of resolver.Address, which is shoved into connectCtx + // and passed to the dialer and credential handshaker. This makes it possible for + // address specific arbitrary data to reach custom dialers and credential handshakers. + connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) + + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) + if err != nil { + if opts.FailOnNonTempDialError { + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + } + return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err) + } + + // Any further errors will close the underlying connection + defer func(conn net.Conn) { + if err != nil { + conn.Close() + } + }(conn) + + // The following defer and goroutine monitor the connectCtx for cancelation + // and deadline. On context expiration, the connection is hard closed and + // this function will naturally fail as a result. Otherwise, the defer + // waits for the goroutine to exit to prevent the context from being + // monitored (and to prevent the connection from ever being closed) after + // returning from this function. + ctxMonitorDone := grpcsync.NewEvent() + newClientCtx, newClientDone := context.WithCancel(connectCtx) + defer func() { + newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. + <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. + }() + go func(conn net.Conn) { + defer ctxMonitorDone.Fire() // Signal this goroutine has exited. + <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. + if err := connectCtx.Err(); err != nil { + // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("Aborting due to connect deadline expiring: %v", err) + } + conn.Close() + } + }(conn) + + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { + kp.Time = defaultClientKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultClientKeepaliveTimeout + } + keepaliveEnabled := false + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + keepaliveEnabled = true + } + var ( + isSecure bool + authInfo credentials.AuthInfo + ) + transportCreds := opts.TransportCredentials + perRPCCreds := opts.PerRPCCredentials + + if b := opts.CredsBundle; b != nil { + if t := b.TransportCredentials(); t != nil { + transportCreds = t + } + if t := b.PerRPCCredentials(); t != nil { + perRPCCreds = append(perRPCCreds, t) + } + } + if transportCreds != nil { + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) + if err != nil { + return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) + } + for _, cd := range perRPCCreds { + if cd.RequireTransportSecurity() { + if ci, ok := authInfo.(interface { + GetCommonAuthInfo() credentials.CommonAuthInfo + }); ok { + secLevel := ci.GetCommonAuthInfo().SecurityLevel + if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity { + return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection") + } + } + } + } + isSecure = true + if transportCreds.Info().SecurityProtocol == "tls" { + scheme = "https" + } + } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } + writeBufSize := opts.WriteBufferSize + readBufSize := opts.ReadBufferSize + maxHeaderListSize := defaultClientMaxHeaderListSize + if opts.MaxHeaderListSize != nil { + maxHeaderListSize = *opts.MaxHeaderListSize + } + t := &http2Client{ + ctx: ctx, + ctxDone: ctx.Done(), // Cache Done chan. + cancel: cancel, + userAgent: opts.UserAgent, + registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + goAway: make(chan struct{}), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), + fc: &trInFlow{limit: uint32(icwz)}, + scheme: scheme, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + perRPCCreds: perRPCCreds, + kp: kp, + statsHandlers: opts.StatsHandlers, + initialWindowSize: initialWindowSize, + nextID: 1, + maxConcurrentStreams: defaultMaxStreamsClient, + streamQuota: defaultMaxStreamsClient, + streamsQuotaAvailable: make(chan struct{}, 1), + czData: new(channelzData), + keepaliveEnabled: keepaliveEnabled, + bufferPool: newBufferPool(), + onClose: onClose, + } + t.logger = prefixLoggerForClientTransport(t) + // Add peer information to the http2client context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) + + if md, ok := addr.Metadata.(*metadata.MD); ok { + t.md = *md + } else if md := imetadata.Get(addr); md != nil { + t.md = md + } + t.controlBuf = newControlBuffer(t.ctxDone) + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + for _, sh := range t.statsHandlers { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{ + Client: true, + } + sh.HandleConn(t.ctx, connBegin) + } + t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + if err != nil { + return nil, err + } + if t.keepaliveEnabled { + t.kpDormancyCond = sync.NewCond(&t.mu) + go t.keepalive() + } + + // Start the reader goroutine for incoming messages. Each transport has a + // dedicated goroutine which reads HTTP2 frames from the network. Then it + // dispatches the frame to the corresponding stream entity. When the + // server preface is received, readerErrCh is closed. If an error occurs + // first, an error is pushed to the channel. This must be checked before + // returning from this function. + readerErrCh := make(chan error, 1) + go t.reader(readerErrCh) + defer func() { + if err == nil { + err = <-readerErrCh + } + if err != nil { + t.Close(err) + } + }() + + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + return nil, err + } + if n != len(clientPreface) { + err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + return nil, err + } + var ss []http2.Setting + + if t.initialWindowSize != defaultWindowSize { + ss = append(ss, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(t.initialWindowSize), + }) + } + if opts.MaxHeaderListSize != nil { + ss = append(ss, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *opts.MaxHeaderListSize, + }) + } + err = t.framer.fr.WriteSettings(ss...) + if err != nil { + err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + return nil, err + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) + return nil, err + } + } + + t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1) + + if err := t.framer.writer.Flush(); err != nil { + return nil, err + } + go func() { + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) + t.loopy.run() + close(t.writerDone) + }() + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + ct: t, + done: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + headerChan: make(chan struct{}), + contentSubtype: callHdr.ContentSubtype, + doneFunc: callHdr.DoneFunc, + } + s.wq = newWriteQuota(defaultWriteQuota, s.done) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + // The client side stream context should have exactly the same life cycle with the user provided context. + // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. + // So we use the original context here instead of creating a copy. + s.ctx = ctx + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: s.buf, + closeStream: func(err error) { + t.CloseStream(s, err) + }, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + return s +} + +func (t *http2Client) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + } +} + +func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { + aud := t.createAudience(callHdr) + ri := credentials.RequestInfo{ + Method: callHdr.Method, + AuthInfo: t.authInfo, + } + ctxWithRequestInfo := icredentials.NewRequestInfoContext(ctx, ri) + authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) + if err != nil { + return nil, err + } + callAuthData, err := t.getCallAuthData(ctxWithRequestInfo, aud, callHdr) + if err != nil { + return nil, err + } + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + if callHdr.PreviousAttempts > 0 { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) + } + + registeredCompressors := t.registeredCompressors + if callHdr.SendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + // Include the outgoing compressor name when compressor is not registered + // via encoding.RegisterCompressor. This is possible when client uses + // WithCompressor dial option. + if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { + if registeredCompressors != "" { + registeredCompressors += "," + } + registeredCompressors += callHdr.SendCompress + } + } + + if registeredCompressors != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) + } + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. + timeout := time.Until(dl) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)}) + } + for k, v := range authData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } + + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + var k string + for k, vv := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + for _, vv := range added { + for i, v := range vv { + if i%2 == 0 { + k = strings.ToLower(v) + continue + } + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + for k, vv := range t.md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields, nil +} + +func (t *http2Client) createAudience(callHdr *CallHdr) string { + // Create an audience string only if needed. + if len(t.perRPCCreds) == 0 && callHdr.Creds == nil { + return "" + } + // Construct URI required to get auth request metadata. + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + pos = len(callHdr.Method) + } + return "https://" + host + callHdr.Method[:pos] +} + +func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { + if len(t.perRPCCreds) == 0 { + return nil, nil + } + authData := map[string]string{} + for _, c := range t.perRPCCreds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + + return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + authData[k] = v + } + } + return authData, nil +} + +func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { + var callAuthData map[string]string + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if callCreds.RequireTransportSecurity() { + ri, _ := credentials.RequestInfoFromContext(ctx) + if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil { + return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) + } + callAuthData = make(map[string]string, len(data)) + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } + return callAuthData, nil +} + +// NewStreamError wraps an error and reports additional information. Typically +// NewStream errors result in transparent retry, as they mean nothing went onto +// the wire. However, there are two notable exceptions: +// +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. +type NewStreamError struct { + Err error + + AllowTransparentRetry bool +} + +func (e NewStreamError) Error() string { + return e.Err.Error() +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams. All non-nil errors returned will be *NewStreamError. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { + ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + + headerFields, err := t.createHeaderFields(ctx, callHdr) + if err != nil { + return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} + } + s := t.newStream(ctx, callHdr) + cleanup := func(err error) { + if s.swapState(streamDone) == streamDone { + // If it was already done, return. + return + } + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + s.write(recvMsg{err: err}) + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + close(s.headerChan) + } + } + hdr := &headerFrame{ + hf: headerFields, + endStream: false, + initStream: func(id uint32) error { + t.mu.Lock() + // TODO: handle transport closure in loopy instead and remove this + // initStream is never called when transport is draining. + if t.state == closing { + t.mu.Unlock() + cleanup(ErrConnClosing) + return ErrConnClosing + } + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + // If the keepalive goroutine has gone dormant, wake it up. + if t.kpDormant { + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + return nil + }, + onOrphaned: cleanup, + wq: s.wq, + } + firstTry := true + var ch chan struct{} + transportDrainRequired := false + checkForStreamQuota := func(it any) bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { + t.waitingStreams++ + } + ch = t.streamsQuotaAvailable + return false + } + if !firstTry { + t.waitingStreams-- + } + t.streamQuota-- + h := it.(*headerFrame) + h.streamID = t.nextID + t.nextID += 2 + + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + + s.id = h.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + t.mu.Lock() + if t.state == draining || t.activeStreams == nil { // Can be niled from Close(). + t.mu.Unlock() + return false // Don't create a stream if the transport is already closed. + } + t.activeStreams[s.id] = s + t.mu.Unlock() + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + var hdrListSizeErr error + checkForHeaderListSize := func(it any) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize) + return false + } + } + return true + } + for { + success, err := t.controlBuf.executeAndPut(func(it any) bool { + return checkForHeaderListSize(it) && checkForStreamQuota(it) + }, hdr) + if err != nil { + // Connection closed. + return nil, &NewStreamError{Err: err, AllowTransparentRetry: true} + } + if success { + break + } + if hdrListSizeErr != nil { + return nil, &NewStreamError{Err: hdrListSizeErr} + } + firstTry = false + select { + case <-ch: + case <-ctx.Done(): + return nil, &NewStreamError{Err: ContextErr(ctx.Err())} + case <-t.goAway: + return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true} + case <-t.ctx.Done(): + return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true} + } + } + if len(t.statsHandlers) != 0 { + header, ok := metadata.FromOutgoingContext(ctx) + if ok { + header.Set("user-agent", t.userAgent) + } else { + header = metadata.Pairs("user-agent", t.userAgent) + } + for _, sh := range t.statsHandlers { + // Note: The header fields are compressed with hpack after this call returns. + // No WireLength field is set here. + // Note: Creating a new stats object to prevent pollution. + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + Header: header, + } + sh.HandleRPC(s.ctx, outHeader) + } + } + if transportDrainRequired { + if t.logger.V(logLevel) { + t.logger.Infof("Draining transport: t.nextID > MaxStreamID") + } + t.GracefulClose() + } + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { + // Set stream status to done. + if s.swapState(streamDone) == streamDone { + // If it was already done, return. If multiple closeStream calls + // happen simultaneously, wait for the first to finish. + <-s.done + return + } + // status and trailers can be updated here without any synchronization because the stream goroutine will + // only read it after it sees an io.EOF error from read or write and we'll write those errors + // only after updating this. + s.status = st + if len(mdata) > 0 { + s.trailer = mdata + } + if err != nil { + // This will unblock reads eventually. + s.write(recvMsg{err: err}) + } + // If headerChan isn't closed, then close it. + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.noHeaders = true + close(s.headerChan) + } + cleanup := &cleanupStream{ + streamID: s.id, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + } + t.mu.Unlock() + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } + }, + rst: rst, + rstCode: rstCode, + } + addBackStreamQuota := func(any) bool { + t.streamQuota++ + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) + // This will unblock write. + close(s.done) + if s.doneFunc != nil { + s.doneFunc() + } +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +func (t *http2Client) Close(err error) { + t.mu.Lock() + // Make sure we only close once. + if t.state == closing { + t.mu.Unlock() + return + } + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) + } + // Call t.onClose ASAP to prevent the client from attempting to create new + // streams. + if t.state != draining { + t.onClose(GoAwayInvalid) + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + if t.kpDormant { + // If the keepalive goroutine is blocked on this condition variable, we + // should unblock it so that the goroutine eventually exits. + t.kpDormancyCond.Signal() + } + t.mu.Unlock() + t.controlBuf.finish() + t.cancel() + t.conn.Close() + channelz.RemoveEntry(t.channelzID) + // Append info about previous goaways if there were any, since this may be important + // for understanding the root cause for this connection to be closed. + _, goAwayDebugMessage := t.GetGoAwayReason() + + var st *status.Status + if len(goAwayDebugMessage) > 0 { + st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage) + err = st.Err() + } else { + st = status.New(codes.Unavailable, err.Error()) + } + + // Notify all active streams. + for _, s := range streams { + t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false) + } + for _, sh := range t.statsHandlers { + connEnd := &stats.ConnEnd{ + Client: true, + } + sh.HandleConn(t.ctx, connEnd) + } +} + +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. +func (t *http2Client) GracefulClose() { + t.mu.Lock() + // Make sure we move to draining only from active. + if t.state == draining || t.state == closing { + t.mu.Unlock() + return + } + if t.logger.V(logLevel) { + t.logger.Infof("GracefulClose called") + } + t.onClose(GoAwayInvalid) + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) + return + } + t.controlBuf.put(&incomingGoAway{}) +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if opts.Last { + // If it's the last message, update stream state. + if !s.compareAndSwapState(streamActive, streamWriteDone) { + return errStreamDone + } + } else if s.getState() != streamActive { + return errStreamDone + } + df := &dataFrame{ + streamID: s.id, + endStream: opts.Last, + h: hdr, + d: data, + } + if hdr != nil || data != nil { // If it's not an empty data frame, check quota. + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + return err + } + } + return t.controlBuf.put(df) +} + +func (t *http2Client) getStream(f http2.Frame) *Stream { + t.mu.Lock() + s := t.activeStreams[f.Header().StreamID] + t.mu.Unlock() + return s +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateWindow adjusts the inbound quota for the stream. +// Window updates will be sent out when the cumulative quota +// exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + updateIWS := func(any) bool { + t.initialWindowSize = int32(n) + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() + return true + } + t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s := t.getStream(f) + if s == nil { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.StreamEnded() { + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s := t.getStream(f) + if s == nil { + return + } + if f.ErrCode == http2.ErrCodeRefusedStream { + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + } + statusCode, ok := http2ErrConvTab[f.ErrCode] + if !ok { + if t.logger.V(logLevel) { + t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode) + } + statusCode = codes.Unknown + } + if statusCode == codes.Canceled { + if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) { + // Our deadline was already exceeded, and that was likely the cause + // of this cancelation. Alter the status code accordingly. + statusCode = codes.DeadlineExceeded + } + } + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { + if f.IsAck() { + return + } + var maxStreams *uint32 + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxConcurrentStreams: + maxStreams = new(uint32) + *maxStreams = s.Val + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + if isFirst && maxStreams == nil { + maxStreams = new(uint32) + *maxStreams = math.MaxUint32 + } + sf := &incomingSettings{ + ss: ss, + } + if maxStreams != nil { + updateStreamQuota := func() { + delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) + t.maxConcurrentStreams = *maxStreams + t.streamQuota += delta + if delta > 0 && t.waitingStreams > 0 { + close(t.streamsQuotaAvailable) // wake all of them up. + t.streamsQuotaAvailable = make(chan struct{}, 1) + } + } + updateFuncs = append(updateFuncs, updateStreamQuota) + } + t.controlBuf.executeAndPut(func(any) bool { + for _, f := range updateFuncs { + f() + } + return true + }, sf) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" { + // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug + // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is + // enabled by default and double the configure KEEPALIVE_TIME used for new connections + // on that channel. + logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".") + } + id := f.LastStreamID + if id > 0 && id%2 == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id)) + return + } + // A client can receive multiple GoAways from the server (see + // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first + // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be + // sent after an RTT delay with the ID of the last stream the server will + // process. + // + // Therefore, when we get the first GoAway we don't necessarily close any + // streams. While in case of second GoAway we close all streams created after + // the GoAwayId. This way streams that were in-flight while the GoAway from + // server was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)) + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held. + // Notify the clientconn about the GOAWAY before we set the state to + // draining, to allow the client to stop attempting to create streams + // before disallowing new streams on this connection. + if t.state != draining { + t.onClose(t.goAwayReason) + t.state = draining + } + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + + t.prevGoAwayID = id + if len(t.activeStreams) == 0 { + t.mu.Unlock() + t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams")) + return + } + + streamsToClose := make([]*Stream, 0) + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + // The stream was unprocessed by the server. + if streamID > id && streamID <= upperLimit { + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) + } + } + } + t.mu.Unlock() + // Called outside t.mu because closeStream can take controlBuf's mu, which + // could induce deadlock and is not allowed. + for _, stream := range streamsToClose { + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + } +} + +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutex to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = GoAwayNoReason + switch f.ErrCode { + case http2.ErrCodeEnhanceYourCalm: + if string(f.DebugData()) == "too_many_pings" { + t.goAwayReason = GoAwayTooManyPings + } + } + if len(f.DebugData()) == 0 { + t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode) + } else { + t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData())) + } +} + +func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) { + t.mu.Lock() + defer t.mu.Unlock() + return t.goAwayReason, t.goAwayDebugMessage +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s := t.getStream(frame) + if s == nil { + return + } + endStream := frame.StreamEnded() + atomic.StoreUint32(&s.bytesReceived, 1) + initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0 + + if !initialHeader && !endStream { + // As specified by gRPC over HTTP2, a HEADERS frame (and associated CONTINUATION frames) can only appear at the start or end of a stream. Therefore, second HEADERS frame must have EOS bit set. + st := status.New(codes.Internal, "a HEADERS frame cannot appear in the middle of a stream") + t.closeStream(s, st.Err(), true, http2.ErrCodeProtocol, st, nil, false) + return + } + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + se := status.New(codes.Internal, "peer header list size exceeded limit") + t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream) + return + } + + var ( + // If a gRPC Response-Headers has already been received, then it means + // that the peer is speaking gRPC and we are in gRPC mode. + isGRPC = !initialHeader + mdata = make(map[string][]string) + contentTypeErr = "malformed header: missing HTTP content-type" + grpcMessage string + statusGen *status.Status + recvCompress string + httpStatusCode *int + httpStatusErr string + rawStatusCode = codes.Unknown + // headerError is set if an error is encountered while parsing the headers + headerError string + ) + + if initialHeader { + httpStatusErr = "malformed header: missing HTTP status" + } + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType { + contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value) + break + } + contentTypeErr = "" + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + isGRPC = true + case "grpc-encoding": + recvCompress = hf.Value + case "grpc-status": + code, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + rawStatusCode = codes.Code(uint32(code)) + case "grpc-message": + grpcMessage = decodeGrpcMessage(hf.Value) + case "grpc-status-details-bin": + var err error + statusGen, err = decodeGRPCStatusDetails(hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) + } + case ":status": + if hf.Value == "200" { + httpStatusErr = "" + statusCode := 200 + httpStatusCode = &statusCode + break + } + + c, err := strconv.ParseInt(hf.Value, 10, 32) + if err != nil { + se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err)) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + statusCode := int(c) + httpStatusCode = &statusCode + + httpStatusErr = fmt.Sprintf( + "unexpected HTTP status code received from server: %d (%s)", + statusCode, + http.StatusText(statusCode), + ) + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err) + logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + if !isGRPC || httpStatusErr != "" { + var code = codes.Internal // when header does not include HTTP status, return INTERNAL + + if httpStatusCode != nil { + var ok bool + code, ok = HTTPStatusConvTab[*httpStatusCode] + if !ok { + code = codes.Unknown + } + } + var errs []string + if httpStatusErr != "" { + errs = append(errs, httpStatusErr) + } + if contentTypeErr != "" { + errs = append(errs, contentTypeErr) + } + // Verify the HTTP response is a 200. + se := status.New(code, strings.Join(errs, "; ")) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + if headerError != "" { + se := status.New(codes.Internal, headerError) + t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream) + return + } + + // For headers, set them in s.header and close headerChan. For trailers or + // trailers-only, closeStream will set the trailers and close headerChan as + // needed. + if !endStream { + // If headerChan hasn't been closed yet (expected, given we checked it + // above, but something else could have potentially closed the whole + // stream). + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true + // These values can be set without any synchronization because + // stream goroutine will read it only after seeing a closed + // headerChan which we'll close after setting this. + s.recvCompress = recvCompress + if len(mdata) > 0 { + s.header = mdata + } + close(s.headerChan) + } + } + + for _, sh := range t.statsHandlers { + if !endStream { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + Header: metadata.MD(mdata).Copy(), + Compression: s.recvCompress, + } + sh.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + Trailer: metadata.MD(mdata).Copy(), + } + sh.HandleRPC(s.ctx, inTrailer) + } + } + + if !endStream { + return + } + + if statusGen == nil { + statusGen = status.New(rawStatusCode, grpcMessage) + } + + // If client received END_STREAM from server while stream was still active, + // send RST_STREAM. + rstStream := s.getState() == streamActive + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, statusGen, mdata, true) +} + +// readServerPreface reads and handles the initial settings frame from the +// server. +func (t *http2Client) readServerPreface() error { + frame, err := t.framer.fr.ReadFrame() + if err != nil { + return connectionErrorf(true, err, "error reading server preface: %v", err) + } + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) + } + t.handleSettings(sf, true) + return nil +} + +// reader verifies the server preface and reads all subsequent data from +// network connection. If the server preface is not read successfully, an +// error is pushed to errCh; otherwise errCh is closed with no error. +func (t *http2Client) reader(errCh chan<- error) { + defer close(t.readerDone) + + if err := t.readServerPreface(); err != nil { + errCh <- err + return + } + close(errCh) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + + // loop to keep reading incoming messages on this transport. + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } + if err != nil { + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + code := http2ErrConvTab[se.Code] + errorDetail := t.framer.fr.ErrorDetail() + var msg string + if errorDetail != nil { + msg = errorDetail.Error() + } else { + msg = "received invalid frame" + } + t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false) + } + continue + } else { + // Transport error. + t.Close(connectionErrorf(true, err, "error reading from server: %v", err)) + return + } + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame, false) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + if logger.V(logLevel) { + logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } + } +} + +func minTime(a, b time.Duration) time.Duration { + if a < b { + return a + } + return b +} + +// keepalive running in a separate goroutine makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + timeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + timer := time.NewTimer(t.kp.Time) + for { + select { + case <-timer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were here. + outstandingPing = false + // Next timer should fire at kp.Time seconds from lastRead time. + timer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && timeoutLeft <= 0 { + t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")) + return + } + t.mu.Lock() + if t.state == closing { + // If the transport is closing, we should exit from the + // keepalive goroutine here. If not, we could have a race + // between the call to Signal() from Close() and the call to + // Wait() here, whereby the keepalive goroutine ends up + // blocking on the condition variable which will never be + // signalled again. + t.mu.Unlock() + return + } + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // If a ping was sent out previously (because there were active + // streams at that point) which wasn't acked and its timeout + // hadn't fired, but we got here and are about to go dormant, + // we should make sure that we unconditionally send a ping once + // we awaken. + outstandingPing = false + t.kpDormant = true + t.kpDormancyCond.Wait() + } + t.kpDormant = false + t.mu.Unlock() + + // We get here either because we were dormant and a new stream was + // created which unblocked the Wait() call, or because the + // keepalive timer expired. In both cases, we need to send a ping. + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + timeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, timeoutLeft) + timeoutLeft -= sleepDuration + timer.Reset(sleepDuration) + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.ctx.Done() +} + +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} + +func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } + +func (t *http2Client) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Client) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Client) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} + +func (t *http2Client) stateForTesting() transportState { + t.mu.Lock() + defer t.mu.Unlock() + return t.state +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go new file mode 100644 index 0000000000..c06db679d8 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -0,0 +1,1464 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/syscall" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +var ( + // ErrIllegalHeaderWrite indicates that setting header is illegal because of + // the stream's state. + ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times") + // ErrHeaderListSizeLimitViolation indicates that the header list size is larger + // than the limit set by peer. + ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer") +) + +// serverConnectionCounter counts the number of connections a server has seen +// (equal to the number of http2Servers created). Must be accessed atomically. +var serverConnectionCounter uint64 + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + inTapHandle tap.ServerInHandle + framer *framer + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + stats []stats.Handler + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + // Keepalive enforcement policy. + kep keepalive.EnforcementPolicy + // The time instance last ping was received. + lastPingAt time.Time + // Number of times the client has violated keepalive ping policy so far. + pingStrikes uint8 + // Flag to signify that number of ping strikes should be reset to 0. + // This is set whenever data or header frames are sent. + // 1 means yes. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + maxSendHeaderListSize *uint32 + + mu sync.Mutex // guard the following + + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event + state transportState + activeStreams map[uint32]*Stream + // idle is the time instant when the connection went idle. + // This is either the beginning of the connection or when the number of + // RPCs go down to 0. + // When the connection is busy, this value is set to 0. + idle time.Time + + // Fields below are for channelz metric collection. + channelzID *channelz.Identifier + czData *channelzData + bufferPool *bufferPool + + connectionID uint64 + + // maxStreamMu guards the maximum stream ID + // This lock may not be taken if mu is already held. + maxStreamMu sync.Mutex + maxStreamID uint32 // max stream ID ever seen + + logger *grpclog.PrefixLogger +} + +// NewServerTransport creates a http2 transport with conn and configuration +// options from config. +// +// It returns a non-nil transport and a nil error on success. On failure, it +// returns a nil transport and a non-nil error. For a special case where the +// underlying conn gets closed before the client preface could be read, it +// returns a nil transport and a nil error. +func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + var authInfo credentials.AuthInfo + rawConn := conn + if config.Credentials != nil { + var err error + conn, authInfo, err = config.Credentials.ServerHandshake(rawConn) + if err != nil { + // ErrConnDispatched means that the connection was dispatched away + // from gRPC; those connections should be left open. io.EOF means + // the connection was closed before handshaking completed, which can + // happen naturally from probers. Return these errors directly. + if err == credentials.ErrConnDispatched || err == io.EOF { + return nil, err + } + return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + } + } + writeBufSize := config.WriteBufferSize + readBufSize := config.ReadBufferSize + maxHeaderListSize := defaultServerMaxHeaderListSize + if config.MaxHeaderListSize != nil { + maxHeaderListSize = *config.MaxHeaderListSize + } + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) + // Send initial settings as connection preface to client. + isettings := []http2.Setting{{ + ID: http2.SettingMaxFrameSize, + Val: http2MaxFrameLen, + }} + if config.MaxStreams != math.MaxUint32 { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: config.MaxStreams, + }) + } + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false + } + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if config.MaxHeaderListSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxHeaderListSize, + Val: *config.MaxHeaderListSize, + }) + } + if config.HeaderTableSize != nil { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingHeaderTableSize, + Val: *config.HeaderTableSize, + }) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + } + kp := config.KeepaliveParams + if kp.MaxConnectionIdle == 0 { + kp.MaxConnectionIdle = defaultMaxConnectionIdle + } + if kp.MaxConnectionAge == 0 { + kp.MaxConnectionAge = defaultMaxConnectionAge + } + // Add a jitter to MaxConnectionAge. + kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) + if kp.MaxConnectionAgeGrace == 0 { + kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace + } + if kp.Time == 0 { + kp.Time = defaultServerKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } + + done := make(chan struct{}) + t := &http2Server{ + ctx: setConnection(context.Background(), rawConn), + done: done, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + framer: framer, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + maxStreams: config.MaxStreams, + inTapHandle: config.InTapHandle, + fc: &trInFlow{limit: uint32(icwz)}, + state: reachable, + activeStreams: make(map[uint32]*Stream), + stats: config.StatsHandlers, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + czData: new(channelzData), + bufferPool: newBufferPool(), + } + t.logger = prefixLoggerForServerTransport(t) + // Add peer information to the http2server context. + t.ctx = peer.NewContext(t.ctx, t.getPeer()) + + t.controlBuf = newControlBuffer(t.done) + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + for _, sh := range t.stats { + t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{} + sh.HandleConn(t.ctx, connBegin) + } + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + if err != nil { + return nil, err + } + + t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) + t.framer.writer.Flush() + + defer func() { + if err != nil { + t.Close(err) + } + }() + + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + // In deployments where a gRPC server runs behind a cloud load balancer + // which performs regular TCP level health checks, the connection is + // closed immediately by the latter. Returning io.EOF here allows the + // grpc server implementation to recognize this scenario and suppress + // logging to reduce spam. + if err == io.EOF { + return nil, io.EOF + } + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, err + } + if err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + } + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + } + t.handleSettings(sf) + + go func() { + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) + t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler + t.loopy.run() + close(t.writerDone) + }() + go t.keepalive() + return t, nil +} + +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { + // Acquire max stream ID lock for entire duration + t.maxStreamMu.Lock() + defer t.maxStreamMu.Unlock() + + streamID := frame.Header().StreamID + + // frame.Truncated is set to true when framer detects that the current header + // list size hits MaxHeaderListSize limit. + if frame.Truncated { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeFrameSize, + onWrite: func() {}, + }) + return nil + } + + if streamID%2 != 1 || streamID <= t.maxStreamID { + // illegal gRPC stream id. + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) + } + t.maxStreamID = streamID + + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + } + var ( + // if false, content-type was missing or invalid + isGRPC = false + contentType = "" + mdata = make(metadata.MD, len(frame.Fields)) + httpMethod string + // these are set if an error is encountered while parsing the headers + protocolError bool + headerError *status.Status + + timeoutSet bool + timeout time.Duration + ) + + for _, hf := range frame.Fields { + switch hf.Name { + case "content-type": + contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) + if !validContentType { + contentType = hf.Value + break + } + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + s.contentSubtype = contentSubtype + isGRPC = true + + case "grpc-accept-encoding": + mdata[hf.Name] = append(mdata[hf.Name], hf.Value) + if hf.Value == "" { + continue + } + compressors := hf.Value + if s.clientAdvertisedCompressors != "" { + compressors = s.clientAdvertisedCompressors + "," + compressors + } + s.clientAdvertisedCompressors = compressors + case "grpc-encoding": + s.recvCompress = hf.Value + case ":method": + httpMethod = hf.Value + case ":path": + s.method = hf.Value + case "grpc-timeout": + timeoutSet = true + var err error + if timeout, err = decodeTimeout(hf.Value); err != nil { + headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err) + } + // "Transports must consider requests containing the Connection header + // as malformed." - A41 + case "connection": + if t.logger.V(logLevel) { + t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec") + } + protocolError = true + default: + if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { + break + } + v, err := decodeMetadataHeader(hf.Name, hf.Value) + if err != nil { + headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) + t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) + break + } + mdata[hf.Name] = append(mdata[hf.Name], v) + } + } + + // "If multiple Host headers or multiple :authority headers are present, the + // request must be rejected with an HTTP status code 400 as required by Host + // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM + // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2 + // error, this takes precedence over a client not speaking gRPC. + if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 { + errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"])) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) + return nil + } + + if protocolError { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeProtocol, + onWrite: func() {}, + }) + return nil + } + if !isGRPC { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusUnsupportedMediaType, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType), + rst: !frame.StreamEnded(), + }) + return nil + } + if headerError != nil { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: headerError, + rst: !frame.StreamEnded(), + }) + return nil + } + + // "If :authority is missing, Host must be renamed to :authority." - A41 + if len(mdata[":authority"]) == 0 { + // No-op if host isn't present, no eventual :authority header is a valid + // RPC. + if host, ok := mdata["host"]; ok { + mdata[":authority"] = host + delete(mdata, "host") + } + } else { + // "If :authority is present, Host must be discarded" - A41 + delete(mdata, "host") + } + + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) + } else { + s.ctx, s.cancel = context.WithCancel(t.ctx) + } + + // Attach the received metadata to the context. + if len(mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, mdata) + if statsTags := mdata["grpc-tags-bin"]; len(statsTags) > 0 { + s.ctx = stats.SetIncomingTags(s.ctx, []byte(statsTags[len(statsTags)-1])) + } + if statsTrace := mdata["grpc-trace-bin"]; len(statsTrace) > 0 { + s.ctx = stats.SetIncomingTrace(s.ctx, []byte(statsTrace[len(statsTrace)-1])) + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + s.cancel() + return nil + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + s.cancel() + return nil + } + if httpMethod != http.MethodPost { + t.mu.Unlock() + errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod) + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early: %v", errMsg) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 405, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.New(codes.Internal, errMsg), + rst: !frame.StreamEnded(), + }) + s.cancel() + return nil + } + if t.inTapHandle != nil { + var err error + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + t.mu.Unlock() + if t.logger.V(logLevel) { + t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) + } + stat, ok := status.FromError(err) + if !ok { + stat = status.New(codes.PermissionDenied, err.Error()) + } + t.controlBuf.put(&earlyAbortStream{ + httpStatus: 200, + streamID: s.id, + contentSubtype: s.contentSubtype, + status: stat, + rst: !frame.StreamEnded(), + }) + return nil + } + } + t.activeStreams[streamID] = s + if len(t.activeStreams) == 1 { + t.idle = time.Time{} + } + t.mu.Unlock() + if channelz.IsOn() { + atomic.AddInt64(&t.czData.streamsStarted, 1) + atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + } + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + s.ctx = traceCtx(s.ctx, s.method) + for _, sh := range t.stats { + s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: s.recvCompress, + WireLength: int(frame.Header().Length), + Header: mdata.Copy(), + } + sh.HandleRPC(s.ctx, inHeader) + } + s.ctxDone = s.ctx.Done() + s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, + freeBuffer: t.bufferPool.put, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + // Register the stream with loopy. + t.controlBuf.put(®isterStream{ + streamID: s.id, + wq: s.wq, + }) + handle(s) + return nil +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +// traceCtx attaches trace to ctx and returns the new context. +func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { + defer close(t.readerDone) + for { + t.controlBuf.throttle() + frame, err := t.framer.fr.ReadFrame() + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + if err != nil { + if se, ok := err.(http2.StreamError); ok { + if t.logger.V(logLevel) { + t.logger.Warningf("Encountered http2.StreamError: %v", se) + } + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s, true, se.Code, false) + } else { + t.controlBuf.put(&cleanupStream{ + streamID: se.StreamID, + rst: true, + rstCode: se.Code, + onWrite: func() {}, + }) + } + continue + } + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close(err) + return + } + t.Close(err) + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + t.Close(err) + break + } + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + // TODO: Handle GoAway from the client appropriately. + default: + if t.logger.V(logLevel) { + t.logger.Infof("Received unsupported frame type %T", frame) + } + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } + +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, + increment: w, + }) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: t.fc.newLimit(n), + }) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) + +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if s.getState() == streamReadDone { + t.closeStream(s, true, http2.ErrCodeStreamClosed, false) + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, true, http2.ErrCodeFlowControl, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + buffer := t.bufferPool.get() + buffer.Reset() + buffer.Write(f.Data()) + s.write(recvMsg{buffer: buffer}) + } + } + if f.StreamEnded() { + // Received the end of stream from the client. + s.compareAndSwapState(streamActive, streamReadDone) + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + // If the stream is not deleted from the transport's active streams map, then do a regular close stream. + if s, ok := t.getStream(f); ok { + t.closeStream(s, false, 0, false) + return + } + // If the stream is already deleted from the active streams map, then put a cleanupStream item into controlbuf to delete the stream from loopy writer's established streams map. + t.controlBuf.put(&cleanupStream{ + streamID: f.Header().StreamID, + rst: false, + rstCode: 0, + onWrite: func() {}, + }) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + var updateFuncs []func() + f.ForeachSetting(func(s http2.Setting) error { + switch s.ID { + case http2.SettingMaxHeaderListSize: + updateFuncs = append(updateFuncs, func() { + t.maxSendHeaderListSize = new(uint32) + *t.maxSendHeaderListSize = s.Val + }) + default: + ss = append(ss, s) + } + return nil + }) + t.controlBuf.executeAndPut(func(any) bool { + for _, f := range updateFuncs { + f() + } + return true + }, &incomingSettings{ + ss: ss, + }) +} + +const ( + maxPingStrikes = 2 + defaultPingTimeout = 2 * time.Hour +) + +func (t *http2Server) handlePing(f *http2.PingFrame) { + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) + + now := time.Now() + defer func() { + t.lastPingAt = now + }() + // A reset ping strikes means that we don't need to check for policy + // violation for this ping and the pingStrikes counter should be set + // to 0. + if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { + t.pingStrikes = 0 + return + } + t.mu.Lock() + ns := len(t.activeStreams) + t.mu.Unlock() + if ns < 1 && !t.kep.PermitWithoutStream { + // Keepalive shouldn't be active thus, this new ping should + // have come after at least defaultPingTimeout. + if t.lastPingAt.Add(defaultPingTimeout).After(now) { + t.pingStrikes++ + } + } else { + // Check if keepalive policy is respected. + if t.lastPingAt.Add(t.kep.MinTime).After(now) { + t.pingStrikes++ + } + } + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) + } +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields +} + +func (t *http2Server) checkForHeaderListSize(it any) bool { + if t.maxSendHeaderListSize == nil { + return true + } + hdrFrame := it.(*headerFrame) + var sz int64 + for _, f := range hdrFrame.hf { + if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) { + if t.logger.V(logLevel) { + t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize) + } + return false + } + } + return true +} + +func (t *http2Server) streamContextErr(s *Stream) error { + select { + case <-t.done: + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) +} + +// WriteHeader sends the header metadata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + if s.getState() == streamDone { + return t.streamContextErr(s) + } + + if s.updateHeaderSent() { + return ErrIllegalHeaderWrite + } + + if md.Len() > 0 { + if s.header.Len() > 0 { + s.header = metadata.Join(s.header, md) + } else { + s.header = md + } + } + if err := t.writeHeaderLocked(s); err != nil { + return status.Convert(err).Err() + } + return nil +} + +func (t *http2Server) setResetPingStrikes() { + atomic.StoreUint32(&t.resetPingStrikes, 1) +} + +func (t *http2Server) writeHeaderLocked(s *Stream) error { + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) + if s.sendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + headerFields = appendHeaderFieldsFromMD(headerFields, s.header) + success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + onWrite: t.setResetPingStrikes, + }) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + for _, sh := range t.stats { + // Note: Headers are compressed with hpack after this call returns. + // No WireLength field is set here. + outHeader := &stats.OutHeader{ + Header: s.header.Copy(), + Compression: s.sendCompress, + } + sh.HandleRPC(s.Context(), outHeader) + } + return nil +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + s.hdrMu.Lock() + defer s.hdrMu.Unlock() + + if s.getState() == streamDone { + return nil + } + + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + if err := t.writeHeaderLocked(s); err != nil { + return err + } + } else { // Send a trailer only response. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)}) + } + } + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) + } else { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + } + } + + // Attach the trailer metadata. + headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) + trailingHeader := &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + onWrite: t.setResetPingStrikes, + } + + success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader) + if !success { + if err != nil { + return err + } + t.closeStream(s, true, http2.ErrCodeInternal, false) + return ErrHeaderListSizeLimitViolation + } + // Send a RST_STREAM after the trailers if the client has not already half-closed. + rst := s.getState() == streamActive + t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true) + for _, sh := range t.stats { + // Note: The trailer fields are compressed with hpack after this call returns. + // No WireLength field is set here. + sh.HandleRPC(s.Context(), &stats.OutTrailer{ + Trailer: s.trailer.Copy(), + }) + } + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if !s.isHeaderSent() { // Headers haven't been written yet. + if err := t.WriteHeader(s, nil); err != nil { + return err + } + } else { + // Writing headers checks for this condition. + if s.getState() == streamDone { + return t.streamContextErr(s) + } + } + df := &dataFrame{ + streamID: s.id, + h: hdr, + d: data, + onEachWrite: t.setResetPingStrikes, + } + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + return t.streamContextErr(s) + } + return t.controlBuf.put(df) +} + +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { + p := &ping{} + // True iff a ping has been sent, and no data has been received since then. + outstandingPing := false + // Amount of time remaining before which we should receive an ACK for the + // last sent ping. + kpTimeoutLeft := time.Duration(0) + // Records the last value of t.lastRead before we go block on the timer. + // This is required to check for read activity since then. + prevNano := time.Now().UnixNano() + // Initialize the different timers to their default values. + idleTimer := time.NewTimer(t.kp.MaxConnectionIdle) + ageTimer := time.NewTimer(t.kp.MaxConnectionAge) + kpTimer := time.NewTimer(t.kp.Time) + defer func() { + // We need to drain the underlying channel in these timers after a call + // to Stop(), only if we are interested in resetting them. Clearly we + // are not interested in resetting them here. + idleTimer.Stop() + ageTimer.Stop() + kpTimer.Stop() + }() + + for { + select { + case <-idleTimer.C: + t.mu.Lock() + idle := t.idle + if idle.IsZero() { // The connection is non-idle. + t.mu.Unlock() + idleTimer.Reset(t.kp.MaxConnectionIdle) + continue + } + val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. + t.Drain("max_idle") + return + } + idleTimer.Reset(val) + case <-ageTimer.C: + t.Drain("max_age") + ageTimer.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-ageTimer.C: + // Close the connection after grace period. + if t.logger.V(logLevel) { + t.logger.Infof("Closing server transport due to maximum connection age") + } + t.controlBuf.put(closeConnection{}) + case <-t.done: + } + return + case <-kpTimer.C: + lastRead := atomic.LoadInt64(&t.lastRead) + if lastRead > prevNano { + // There has been read activity since the last time we were + // here. Setup the timer to fire at kp.Time seconds from + // lastRead time and continue. + outstandingPing = false + kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano())) + prevNano = lastRead + continue + } + if outstandingPing && kpTimeoutLeft <= 0 { + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) + return + } + if !outstandingPing { + if channelz.IsOn() { + atomic.AddInt64(&t.czData.kpCount, 1) + } + t.controlBuf.put(p) + kpTimeoutLeft = t.kp.Timeout + outstandingPing = true + } + // The amount of time to sleep here is the minimum of kp.Time and + // timeoutLeft. This will ensure that we wait only for kp.Time + // before sending out the next ping (for cases where the ping is + // acked). + sleepDuration := minTime(t.kp.Time, kpTimeoutLeft) + kpTimeoutLeft -= sleepDuration + kpTimer.Reset(sleepDuration) + case <-t.done: + return + } + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close(err error) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if t.logger.V(logLevel) { + t.logger.Infof("Closing: %v", err) + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + close(t.done) + if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { + t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) + } + channelz.RemoveEntry(t.channelzID) + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } + for _, sh := range t.stats { + connEnd := &stats.ConnEnd{} + sh.HandleConn(t.ctx, connEnd) + } +} + +// deleteStream deletes the stream s from transport's active streams. +func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { + + t.mu.Lock() + if _, ok := t.activeStreams[s.id]; ok { + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + } + t.mu.Unlock() + + if channelz.IsOn() { + if eosReceived { + atomic.AddInt64(&t.czData.streamsSucceeded, 1) + } else { + atomic.AddInt64(&t.czData.streamsFailed, 1) + } + } +} + +// finishStream closes the stream and puts the trailing headerFrame into controlbuf. +func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + + oldState := s.swapState(streamDone) + if oldState == streamDone { + // If the stream was already done, return. + return + } + + hdr.cleanup = &cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() { + t.deleteStream(s, eosReceived) + }, + } + t.controlBuf.put(hdr) +} + +// closeStream clears the footprint of a stream when the stream is not needed any more. +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) { + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + + s.swapState(streamDone) + t.deleteStream(s, eosReceived) + + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() {}, + }) +} + +func (t *http2Server) RemoteAddr() net.Addr { + return t.remoteAddr +} + +func (t *http2Server) Drain(debugData string) { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainEvent != nil { + return + } + t.drainEvent = grpcsync.NewEvent() + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) +} + +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// Handles outgoing GoAway and returns true if loopy needs to put itself +// in draining mode. +func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.maxStreamMu.Lock() + t.mu.Lock() + if t.state == closing { // TODO(mmukhi): This seems unnecessary. + t.mu.Unlock() + t.maxStreamMu.Unlock() + // The transport is closing. + return false, ErrConnClosing + } + if !g.headsUp { + // Stop accepting more streams now. + t.state = draining + sid := t.maxStreamID + retErr := g.closeConn + if len(t.activeStreams) == 0 { + retErr = errors.New("second GOAWAY written and no active streams left to process") + } + t.mu.Unlock() + t.maxStreamMu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { + return false, err + } + if retErr != nil { + return false, retErr + } + return true, nil + } + t.mu.Unlock() + t.maxStreamMu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { + return false, err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return false, err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() + select { + case <-t.drainEvent.Done(): + case <-timer.C: + case <-t.done: + return + } + t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) + }() + return false, nil +} + +func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { + s := channelz.SocketInternalMetric{ + StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), + StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), + StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), + MessagesSent: atomic.LoadInt64(&t.czData.msgSent), + MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), + KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), + LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), + LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), + LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), + LocalFlowControlWindow: int64(t.fc.getSize()), + SocketOptions: channelz.GetSocketOption(t.conn), + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // RemoteName : + } + if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + s.Security = au.GetSecurityValue() + } + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Server) IncrMsgSent() { + atomic.AddInt64(&t.czData.msgSent, 1) + atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) +} + +func (t *http2Server) IncrMsgRecv() { + atomic.AddInt64(&t.czData.msgRecv, 1) + atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) +} + +func (t *http2Server) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.done: + return -1 + case <-timer.C: + return -2 + } +} + +func (t *http2Server) getPeer() *peer.Peer { + return &peer.Peer{ + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + } +} + +func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 + } + // Generate a jitter between +/- 10% of the value. + r := int64(v / 10) + j := grpcrand.Int63n(2*r) - r + return time.Duration(j) +} + +type connectionKey struct{} + +// GetConnection gets the connection from the context. +func GetConnection(ctx context.Context) net.Conn { + conn, _ := ctx.Value(connectionKey{}).(net.Conn) + return conn +} + +// SetConnection adds the connection to the context to be able to get +// information about the destination ip and port for an incoming RPC. This also +// allows any unary or streaming interceptors to see the connection. +func setConnection(ctx context.Context, conn net.Conn) context.Context { + return context.WithValue(ctx, connectionKey{}, conn) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go new file mode 100644 index 0000000000..1958140082 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -0,0 +1,479 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "encoding/base64" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // https://httpwg.org/specs/rfc7540.html#SettingValues + http2InitHeaderTableSize = 4096 +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.Internal, + } + // HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table. + HTTPStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } +) + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr != "" && hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "user-agent", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + "grpc-status-details-bin", + // Intentionally exclude grpc-previous-rpc-attempts and + // grpc-retry-pushback-ms, which are "reserved", but their API + // intentionally works via metadata. + "te": + return true + default: + return false + } +} + +// isWhitelistedHeader checks whether hdr should be propagated into metadata +// visible to users, even though it is classified as "reserved", above. +func isWhitelistedHeader(hdr string) bool { + switch hdr { + case ":authority", "user-agent": + return true + default: + return false + } +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { + return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { + if strings.HasSuffix(k, binHdrSuffix) { + return encodeBinHeader(([]byte)(v)) + } + return v +} + +func decodeMetadataHeader(k, v string) (string, error) { + if strings.HasSuffix(k, binHdrSuffix) { + b, err := decodeBinHeader(v) + return string(b), err + } + return v, nil +} + +func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { + v, err := decodeBinHeader(rawDetails) + if err != nil { + return nil, err + } + st := &spb.Status{} + if err = proto.Unmarshal(v, st); err != nil { + return nil, err + } + return status.FromProto(st), nil +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +func decodeTimeout(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + if size > 9 { + // Spec allows for 8 digits plus the unit. + return 0, fmt.Errorf("transport: timeout string is too long: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + const maxHours = math.MaxInt64 / int64(time.Hour) + if d == time.Hour && t > maxHours { + // This timeout would overflow math.MaxInt64; clamp it. + return time.Duration(math.MaxInt64), nil + } + return d * time.Duration(t), nil +} + +const ( + spaceByte = ' ' + tildeByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". It does percent encoding and also replaces invalid utf-8 +// characters with Unicode replacement character. +// +// It checks to see if each individual byte in msg is an allowable byte, and +// then either percent encoding or passing it through. When percent encoding, +// the byte is converted into hexadecimal notation with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c <= tildeByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var sb strings.Builder + for len(msg) > 0 { + r, size := utf8.DecodeRuneInString(msg) + for _, b := range []byte(string(r)) { + if size > 1 { + // If size > 1, r is not ascii. Always do percent encoding. + fmt.Fprintf(&sb, "%%%02X", b) + continue + } + + // The for loop is necessary even if size == 1. r could be + // utf8.RuneError. + // + // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD". + if b >= spaceByte && b <= tildeByte && b != percentByte { + sb.WriteByte(b) + } else { + fmt.Fprintf(&sb, "%%%02X", b) + } + } + msg = msg[size:] + } + return sb.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var sb strings.Builder + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) + if err != nil { + sb.WriteByte(c) + } else { + sb.WriteByte(byte(parsed)) + i += 2 + } + } else { + sb.WriteByte(c) + } + } + return sb.String() +} + +type bufWriter struct { + pool *sync.Pool + buf []byte + offset int + batchSize int + conn net.Conn + err error +} + +func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { + w := &bufWriter{ + batchSize: batchSize, + conn: conn, + pool: pool, + } + // this indicates that we should use non shared buf + if pool == nil { + w.buf = make([]byte, batchSize) + } + return w +} + +func (w *bufWriter) Write(b []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + if w.batchSize == 0 { // Buffer has been disabled. + n, err = w.conn.Write(b) + return n, toIOError(err) + } + if w.buf == nil { + b := w.pool.Get().(*[]byte) + w.buf = *b + } + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { + err = w.flushKeepBuffer() + } + } + return n, err +} + +func (w *bufWriter) Flush() error { + err := w.flushKeepBuffer() + // Only release the buffer if we are in a "shared" mode + if w.buf != nil && w.pool != nil { + b := w.buf + w.pool.Put(&b) + w.buf = nil + } + return err +} + +func (w *bufWriter) flushKeepBuffer() error { + if w.err != nil { + return w.err + } + if w.offset == 0 { + return nil + } + _, w.err = w.conn.Write(w.buf[:w.offset]) + w.err = toIOError(w.err) + w.offset = 0 + return w.err +} + +type ioError struct { + error +} + +func (i ioError) Unwrap() error { + return i.error +} + +func isIOError(err error) bool { + return errors.As(err, &ioError{}) +} + +func toIOError(err error) error { + if err == nil { + return nil + } + return ioError{error: err} +} + +type framer struct { + writer *bufWriter + fr *http2.Framer +} + +var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferMutex sync.Mutex + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { + if writeBufferSize < 0 { + writeBufferSize = 0 + } + var r io.Reader = conn + if readBufferSize > 0 { + r = bufio.NewReaderSize(r, readBufferSize) + } + var pool *sync.Pool + if sharedWriteBuffer { + pool = getWriteBufferPool(writeBufferSize) + } + w := newBufWriter(conn, writeBufferSize, pool) + f := &framer{ + writer: w, + fr: http2.NewFramer(w, r), + } + f.fr.SetMaxReadFrameSize(http2MaxFrameLen) + // Opt-in to Frame reuse API on framer to reduce garbage. + // Frames aren't safe to read from after a subsequent call to ReadFrame. + f.fr.SetReuseFrames() + f.fr.MaxHeaderListSize = maxHeaderListSize + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) + return f +} + +func getWriteBufferPool(writeBufferSize int) *sync.Pool { + writeBufferMutex.Lock() + defer writeBufferMutex.Unlock() + size := writeBufferSize * 2 + pool, ok := writeBufferPoolMap[size] + if ok { + return pool + } + pool = &sync.Pool{ + New: func() any { + b := make([]byte, size) + return &b + }, + } + writeBufferPoolMap[size] = pool + return pool +} + +// parseDialTarget returns the network and address to pass to dialer. +func parseDialTarget(target string) (string, string) { + net := "tcp" + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + return n, target[m1+1:] + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr := t.Path + if scheme == "unix" { + if addr == "" { + addr = t.Host + } + return scheme, addr + } + } + return net, target +} diff --git a/vendor/google.golang.org/grpc/internal/transport/logging.go b/vendor/google.golang.org/grpc/internal/transport/logging.go new file mode 100644 index 0000000000..42ed2b07af --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/logging.go @@ -0,0 +1,40 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + + "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" +) + +var logger = grpclog.Component("transport") + +func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p)) +} + +func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p)) +} + +func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger { + return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p)) +} diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go new file mode 100644 index 0000000000..c11b527827 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go @@ -0,0 +1,46 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package networktype declares the network type to be used in the default +// dialer. Attribute of a resolver.Address. +package networktype + +import ( + "google.golang.org/grpc/resolver" +) + +// keyType is the key to use for storing State in Attributes. +type keyType string + +const key = keyType("grpc.internal.transport.networktype") + +// Set returns a copy of the provided address with attributes containing networkType. +func Set(address resolver.Address, networkType string) resolver.Address { + address.Attributes = address.Attributes.WithValue(key, networkType) + return address +} + +// Get returns the network type in the resolver.Address and true, or "", false +// if not present. +func Get(address resolver.Address) (string, bool) { + v := address.Attributes.Value(key) + if v == nil { + return "", false + } + return v.(string), true +} diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go new file mode 100644 index 0000000000..4159619878 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -0,0 +1,142 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "context" + "encoding/base64" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" +) + +const proxyAuthHeaderKey = "Proxy-Authorization" + +var ( + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(address string) (*url.URL, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return nil, err + } + return url, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: backendAddr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + } + if t := proxyURL.User; t != nil { + u := t.Username() + p, _ := t.Password() + req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) + } + + if err := sendHTTPRequest(ctx, req, conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + + return &bufConn{Conn: conn, r: r}, nil +} + +// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy +// is necessary, dials, does the HTTP CONNECT handshake, and returns the +// connection. +func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { + newAddr := addr + proxyURL, err := mapAddress(addr) + if err != nil { + return nil, err + } + if proxyURL != nil { + newAddr = proxyURL.Host + } + + conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + if err != nil { + return + } + if proxyURL != nil { + // proxy is disabled if proxyURL is nil. + conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + } + return +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go new file mode 100644 index 0000000000..74a811fc05 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -0,0 +1,837 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). It is meant for +// grpc-internal usage and is not intended to be imported directly by users. +package transport + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const logLevel = 2 + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPool() *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, + }, + } +} + +func (p *bufferPool) get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +func (p *bufferPool) put(b *bytes.Buffer) { + p.pool.Put(b) +} + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + buffer *bytes.Buffer + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +// recvBuffer is an unbounded channel of recvMsg structs. +// +// Note: recvBuffer differs from buffer.Unbounded only in the fact that it +// holds a channel of recvMsg structs instead of objects implementing "item" +// interface. recvBuffer is written to much more often and using strict recvMsg +// structs helps avoid allocation in "recvBuffer.put" +type recvBuffer struct { + c chan recvMsg + mu sync.Mutex + backlog []recvMsg + err error +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan recvMsg, 1), + } + return b +} + +func (b *recvBuffer) put(r recvMsg) { + b.mu.Lock() + if b.err != nil { + b.mu.Unlock() + // An error had occurred earlier, don't accept more + // data or errors. + return + } + b.err = r.err + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *recvBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { + return b.c +} + +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata. + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last *bytes.Buffer // Stores the remaining data in the previous calls. + err error + freeBuffer func(*bytes.Buffer) +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + if r.last != nil { + // Read remaining data left in last call. + copied, _ := r.last.Read(p) + if r.last.Len() == 0 { + r.freeBuffer(r.last) + r.last = nil + } + return copied, nil + } + if r.closeStream != nil { + n, r.err = r.readClient(p) + } else { + n, r.err = r.read(p) + } + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { + select { + case <-r.ctxDone: + return 0, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readClient(p []byte) (n int, err error) { + // If the context is canceled, then closes the stream with nil metadata. + // closeStream writes its error parameter to r.recv as a recvMsg. + // r.readAdditional acts on that message and returns the necessary error. + select { + case <-r.ctxDone: + // Note that this adds the ctx error to the end of recv buffer, and + // reads from the head. This will delay the error until recv buffer is + // empty, thus will delay ctx cancellation in Recv(). + // + // It's done this way to fix a race between ctx cancel and trailer. The + // race was, stream.Recv() may return ctx error if ctxDone wins the + // race, but stream.Trailer() may return a non-nil md because the stream + // was not marked as done when trailer is received. This closeStream + // call will mark stream as done, thus fix the race. + // + // TODO: delaying ctx error seems like a unnecessary side effect. What + // we really want is to mark the stream as done, and return ctx error + // faster. + r.closeStream(ContextErr(r.ctx.Err())) + m := <-r.recv.get() + return r.readAdditional(m, p) + case m := <-r.recv.get(): + return r.readAdditional(m, p) + } +} + +func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) { + r.recv.load() + if m.err != nil { + return 0, m.err + } + copied, _ := m.buffer.Read(p) + if m.buffer.Len() == 0 { + r.freeBuffer(m.buffer) + r.last = nil + } else { + r.last = m.buffer + } + return copied, nil +} + +type streamState uint32 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // the entire stream is finished. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + st ServerTransport // nil for client side Stream + ct *http2Client // nil for server side Stream + ctx context.Context // the associated context of the stream + cancel context.CancelFunc // always nil for client side Stream + done chan struct{} // closed at the end of stream to unblock writers. On the client side. + doneFunc func() // invoked at the end of stream on client side. + ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) + method string // the associated RPC method of the stream + recvCompress string + sendCompress string + buf *recvBuffer + trReader io.Reader + fc *inFlow + wq *writeQuota + + // Holds compressor names passed in grpc-accept-encoding metadata from the + // client. This is empty for the client side stream. + clientAdvertisedCompressors string + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if needed. + requestRead func(int) + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + // headerValid indicates whether a valid header was received. Only + // meaningful after headerChan is closed (always call waitOnHeader() before + // reading its value). Not valid on server side. + headerValid bool + + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + // On client side, header keeps the received header metadata. + // + // On server side, header keeps the header set by SetHeader(). The complete + // header will merged into this after t.WriteHeader() is called. + header metadata.MD + trailer metadata.MD // the key-value map of trailer metadata. + + noHeaders bool // set if the client never received headers (set only after the stream is done). + + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + + // contentSubtype is the content-subtype for requests. + // this must be lowercase or the behavior is undefined. + contentSubtype string +} + +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was alreay set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + +func (s *Stream) swapState(st streamState) streamState { + return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) +} + +func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { + return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) +} + +func (s *Stream) getState() streamState { + return streamState(atomic.LoadUint32((*uint32)(&s.state))) +} + +func (s *Stream) waitOnHeader() { + if s.headerChan == nil { + // On the server headerChan is always nil since a stream originates + // only after having received headers. + return + } + select { + case <-s.ctx.Done(): + // Close the stream to prevent headers/trailers from changing after + // this function returns. + s.ct.CloseStream(s, ContextErr(s.ctx.Err())) + // headerChan could possibly not be closed yet if closeStream raced + // with operateHeaders; wait until it is closed explicitly here. + <-s.headerChan + case <-s.headerChan: + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + s.waitOnHeader() + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(name string) error { + if s.isHeaderSent() || s.getState() == streamDone { + return errors.New("transport: set send compressor called after headers sent or stream done") + } + + s.sendCompress = name + return nil +} + +// SendCompress returns the send compressor name. +func (s *Stream) SendCompress() string { + return s.sendCompress +} + +// ClientAdvertisedCompressors returns the compressor names advertised by the +// client via grpc-accept-encoding header. +func (s *Stream) ClientAdvertisedCompressors() string { + return s.clientAdvertisedCompressors +} + +// Done returns a channel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Header returns the header metadata of the stream. +// +// On client side, it acquires the key-value pairs of header metadata once it is +// available. It blocks until i) the metadata is ready or ii) there is no header +// metadata or iii) the stream is canceled/expired. +// +// On server side, it returns the out header after t.WriteHeader is called. It +// does not block and must not be called until after WriteHeader. +func (s *Stream) Header() (metadata.MD, error) { + if s.headerChan == nil { + // On server side, return the header in stream. It will be the out + // header after t.WriteHeader is called. + return s.header.Copy(), nil + } + s.waitOnHeader() + + if !s.headerValid || s.noHeaders { + return nil, s.status.Err() + } + + return s.header.Copy(), nil +} + +// TrailersOnly blocks until a header or trailers-only frame is received and +// then returns true if the stream was trailers-only. If the stream ends +// before headers are received, returns true, nil. Client-side only. +func (s *Stream) TrailersOnly() bool { + s.waitOnHeader() + return s.noHeaders +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +// It can be safely read only after stream has ended that is either read +// or write have returned io.EOF. +func (s *Stream) Trailer() metadata.MD { + c := s.trailer.Copy() + return c +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { + return s.contentSubtype +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, after Done() is closed. +func (s *Stream) Status() *status.Status { + return s.status +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +// This should not be called in parallel to other data writes. +func (s *Stream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { + return s.st.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(m) +} + +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + if err != nil { + t.er = err + return + } + t.windowHandler(n) + return +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { + return fmt.Sprintf("", s, s.method) +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + closing + draining +) + +// ServerConfig consists of all the configurations to establish a server transport. +type ServerConfig struct { + MaxStreams uint32 + ConnectionTimeout time.Duration + Credentials credentials.TransportCredentials + InTapHandle tap.ServerInHandle + StatsHandlers []stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int + SharedWriteBuffer bool + ChannelzParentID *channelz.Identifier + MaxHeaderListSize *uint32 + HeaderTableSize *uint32 +} + +// ConnectOptions covers all relevant options for communicating with the server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Dialer specifies how to dial a network address. + Dialer func(context.Context, string) (net.Conn, error) + // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. + FailOnNonTempDialError bool + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client + // connection. Only one of TransportCredentials and CredsBundle is non-nil. + TransportCredentials credentials.TransportCredentials + // CredsBundle is the credentials bundle to be used. Only one of + // TransportCredentials and CredsBundle is non-nil. + CredsBundle credentials.Bundle + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters + // StatsHandlers stores the handler for stats. + StatsHandlers []stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int + // SharedWriteBuffer indicates whether connections should reuse write buffer + SharedWriteBuffer bool + // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. + ChannelzParentID *channelz.Identifier + // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. + MaxHeaderListSize *uint32 + // UseProxy specifies if a proxy should be used. + UseProxy bool +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Last indicates whether this write is the last piece for + // this stream. + Last bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + + // ContentSubtype specifies the content-subtype for a request. For example, a + // content-subtype of "proto" will result in a content-type of + // "application/grpc+proto". The value of ContentSubtype must be all + // lowercase, otherwise the behavior is undefined. See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + ContentSubtype string + + PreviousAttempts int // value of grpc-previous-rpc-attempts header to set + + DoneFunc func() // called when the stream is finished +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close(err error) + + // GracefulClose starts to tear down the transport: the transport will stop + // accepting new RPCs and NewStream will return error. Once all streams are + // finished, the transport will close. + // + // It does not block. + GracefulClose() + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTransport + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} + + // GetGoAwayReason returns the reason why GoAway frame was received, along + // with a human readable string with debug info. + GetGoAwayReason() (GoAwayReason, string) + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. + HandleStreams(func(*Stream), func(context.Context, string) context.Context) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error + + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close(err error) + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain(debugData string) + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string + temp bool + err error +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") + // errStreamDrain indicates that the stream is rejected because the + // connection is draining. This could be caused by goaway or balancer + // removing the address. + errStreamDrain = status.Error(codes.Unavailable, "the connection is draining") + // errStreamDone is returned from write at the client side to indiacte application + // layer of an error. + errStreamDone = errors.New("the stream is done") + // StatusGoAway indicates that the server sent a GOAWAY that included this + // stream's ID in unprocessed RPCs. + statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") +) + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( + // GoAwayInvalid indicates that no GoAway frame is received. + GoAwayInvalid GoAwayReason = 0 + // GoAwayNoReason is the default value when GoAway frame is received. + GoAwayNoReason GoAwayReason = 1 + // GoAwayTooManyPings indicates that a GoAway frame with + // ErrCodeEnhanceYourCalm was received and that the debug data said + // "too_many_pings". + GoAwayTooManyPings GoAwayReason = 2 +) + +// channelzData is used to store channelz related data for http2Client and http2Server. +// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // Client side: The number of streams that have ended successfully by receiving + // EoS bit set frame from server. + // Server side: The number of streams that have ended successfully by sending + // frame with EoS bit set. + streamsSucceeded int64 + streamsFailed int64 + // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type + // instead of time.Time since it's more costly to atomically update time.Time variable than int64 + // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. + lastStreamCreatedTime int64 + msgSent int64 + msgRecv int64 + lastMsgSentTime int64 + lastMsgRecvTime int64 +} + +// ContextErr converts the error from context package into a status error. +func ContextErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err) +} diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go new file mode 100644 index 0000000000..e8b492774d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go @@ -0,0 +1,40 @@ +/* + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package internal + +import ( + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/resolver" +) + +// handshakeClusterNameKey is the type used as the key to store cluster name in +// the Attributes field of resolver.Address. +type handshakeClusterNameKey struct{} + +// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field +// is updated with the cluster name. +func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) + return addr +} + +// GetXDSHandshakeClusterName returns cluster name stored in attr. +func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) { + v := attr.Value(handshakeClusterNameKey{}) + name, ok := v.(string) + return name, ok +} diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 0000000000..34d31b5e7d --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keepalive defines configurable parameters for point-to-point +// healthcheck. +package keepalive + +import ( + "time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a +// connection is broken and send pings so intermediaries will be aware of the +// liveness of the connection. Make sure these parameters are set in +// coordination with the keepalive policy on the server, as incompatible +// settings can result in closing of connection. +type ClientParameters struct { + // After a duration of this time if the client doesn't see any activity it + // pings the server to see if the transport is still alive. + // If set below 10s, a minimum value of 10s will be used instead. + Time time.Duration // The current default value is infinity. + // After having pinged for keepalive check, the client waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. + // If true, client sends keepalive pings even with no active RPCs. If false, + // when there are no active RPCs, Time and Timeout will be ignored and no + // keepalive pings will be sent. + PermitWithoutStream bool // false by default. +} + +// ServerParameters is used to set keepalive and max-age parameters on the +// server-side. +type ServerParameters struct { + // MaxConnectionIdle is a duration for the amount of time after which an + // idle connection would be closed by sending a GoAway. Idleness duration is + // defined since the most recent time the number of outstanding RPCs became + // zero or the connection establishment. + MaxConnectionIdle time.Duration // The current default value is infinity. + // MaxConnectionAge is a duration for the maximum amount of time a + // connection may exist before it will be closed by sending a GoAway. A + // random jitter of +/-10% will be added to MaxConnectionAge to spread out + // connection storms. + MaxConnectionAge time.Duration // The current default value is infinity. + // MaxConnectionAgeGrace is an additive period after MaxConnectionAge after + // which the connection will be forcibly closed. + MaxConnectionAgeGrace time.Duration // The current default value is infinity. + // After a duration of this time if the server doesn't see any activity it + // pings the client to see if the transport is still alive. + // If set below 1s, a minimum value of 1s will be used instead. + Time time.Duration // The current default value is 2 hours. + // After having pinged for keepalive check, the server waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. + Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the +// server-side. Server will close connection with a client that violates this +// policy. +type EnforcementPolicy struct { + // MinTime is the minimum amount of time a client should wait before sending + // a keepalive ping. + MinTime time.Duration // The current default value is 5 minutes. + // If true, server allows keepalive pings even when there are no active + // streams(RPCs). If false, and client sends ping when there are no active + // streams, server will send GOAWAY and close the connection. + PermitWithoutStream bool // false by default. +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 0000000000..a2cdcaf12a --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,295 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +// for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( + "context" + "fmt" + "strings" +) + +// DecodeKeyValue returns k, v, nil. +// +// Deprecated: use k and v directly instead. +func DecodeKeyValue(k, v string) (string, string, error) { + return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { + md := make(MD, len(m)) + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := make(MD, len(kv)/2) + for i := 0; i < len(kv); i += 2 { + key := strings.ToLower(kv[i]) + md[key] = append(md[key], kv[i+1]) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + out := make(MD, len(md)) + for k, v := range md { + out[k] = copyOf(v) + } + return out +} + +// Get obtains the values for a given key. +// +// k is converted to lowercase before searching in md. +func (md MD) Get(k string) []string { + k = strings.ToLower(k) + return md[k] +} + +// Set sets the value of a given key with a slice of values. +// +// k is converted to lowercase before storing in md. +func (md MD) Set(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} + +// Append adds the values to key k, not overwriting what was already stored at +// that key. +// +// k is converted to lowercase before storing in md. +func (md MD) Append(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = append(md[k], vals...) +} + +// Delete removes the values for a given key k which is converted to lowercase +// before removing it from md. +func (md MD) Delete(k string) { + k = strings.ToLower(k) + delete(md, k) +} + +// Join joins any number of mds into a single MD. +// +// The order of values for each key is determined by the order in which the mds +// containing those values are presented to Join. +func Join(mds ...MD) MD { + out := MD{} + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. If used +// in conjunction with AppendToOutgoingContext, NewOutgoingContext will +// overwrite any previously-appended metadata. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) +} + +// AppendToOutgoingContext returns a new context with the provided kv merged +// with any existing metadata in the context. Please refer to the documentation +// of Pairs for a description of kv. +func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) + } + md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) + added := make([][]string, len(md.added)+1) + copy(added, md.added) + kvCopy := make([]string, 0, len(kv)) + for i := 0; i < len(kv); i += 2 { + kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1]) + } + added[len(added)-1] = kvCopy + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromIncomingContext(ctx context.Context) (MD, bool) { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil, false + } + out := make(MD, len(md)) + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = copyOf(v) + } + return out, true +} + +// ValueFromIncomingContext returns the metadata value corresponding to the metadata +// key from the incoming metadata if it exists. Key must be lower-case. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ValueFromIncomingContext(ctx context.Context, key string) []string { + md, ok := ctx.Value(mdIncomingKey{}).(MD) + if !ok { + return nil + } + + if v, ok := md[key]; ok { + return copyOf(v) + } + for k, v := range md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + if strings.ToLower(k) == key { + return copyOf(v) + } + } + return nil +} + +// the returned slice must not be modified in place +func copyOf(v []string) []string { + vals := make([]string, len(v)) + copy(vals, v) + return vals +} + +// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// +// Remember to perform strings.ToLower on the keys, for both the returned MD (MD +// is a map, there's no guarantee it's created using our helper functions) and +// the extra kv pairs (AppendToOutgoingContext doesn't turn them into +// lowercase). +// +// This is intended for gRPC-internal use ONLY. Users should use +// FromOutgoingContext instead. +func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, nil, false + } + + return raw.md, raw.added, true +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. +// +// All keys in the returned MD are lowercase. +func FromOutgoingContext(ctx context.Context) (MD, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, false + } + + mdSize := len(raw.md) + for i := range raw.added { + mdSize += len(raw.added[i]) / 2 + } + + out := make(MD, mdSize) + for k, v := range raw.md { + // We need to manually convert all keys to lower case, because MD is a + // map, and there's no guarantee that the MD attached to the context is + // created using our helper functions. + key := strings.ToLower(k) + out[key] = copyOf(v) + } + for _, added := range raw.added { + if len(added)%2 == 1 { + panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added))) + } + + for i := 0; i < len(added); i += 2 { + key := strings.ToLower(added[i]) + out[key] = append(out[key], added[i+1]) + } + } + return out, ok +} + +type rawMD struct { + md MD + added [][]string +} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 0000000000..e01d219ffb --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "context" + "net" + + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 0000000000..236837f415 --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,236 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "io" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + idle bool + blockingCh chan struct{} + picker balancer.Picker + statsHandlers []stats.Handler // to record blocking picker calls +} + +func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { + return &pickerWrapper{ + blockingCh: make(chan struct{}), + statsHandlers: statsHandlers, + } +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (pw *pickerWrapper) updatePicker(p balancer.Picker) { + pw.mu.Lock() + if pw.done || pw.idle { + // There is a small window where a picker update from the LB policy can + // race with the channel going to idle mode. If the picker is idle here, + // it is because the channel asked it to do so, and therefore it is sage + // to ignore the update from the LB policy. + pw.mu.Unlock() + return + } + pw.picker = p + // pw.blockingCh should never be nil. + close(pw.blockingCh) + pw.blockingCh = make(chan struct{}) + pw.mu.Unlock() +} + +// doneChannelzWrapper performs the following: +// - increments the calls started channelz counter +// - wraps the done function in the passed in result to increment the calls +// failed or calls succeeded channelz counter before invoking the actual +// done function. +func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { + ac := acbw.ac + ac.incrCallsStarted() + done := result.Done + result.Done = func(b balancer.DoneInfo) { + if b.Err != nil && b.Err != io.EOF { + ac.incrCallsFailed() + } else { + ac.incrCallsSucceeded() + } + if done != nil { + done(b) + } + } +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { + var ch chan struct{} + + var lastPickErr error + + for { + pw.mu.Lock() + if pw.done { + pw.mu.Unlock() + return nil, balancer.PickResult{}, ErrClientConnClosing + } + + if pw.picker == nil { + ch = pw.blockingCh + } + if ch == pw.blockingCh { + // This could happen when either: + // - pw.picker is nil (the previous if condition), or + // - has called pick on the current picker. + pw.mu.Unlock() + select { + case <-ctx.Done(): + var errStr string + if lastPickErr != nil { + errStr = "latest balancer error: " + lastPickErr.Error() + } else { + errStr = ctx.Err().Error() + } + switch ctx.Err() { + case context.DeadlineExceeded: + return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) + case context.Canceled: + return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) + } + case <-ch: + } + continue + } + + // If the channel is set, it means that the pick call had to wait for a + // new picker at some point. Either it's the first iteration and this + // function received the first picker, or a picker errored with + // ErrNoSubConnAvailable or errored with failfast set to false, which + // will trigger a continue to the next iteration. In the first case this + // conditional will hit if this call had to block (the channel is set). + // In the second case, the only way it will get to this conditional is + // if there is a new picker. + if ch != nil { + for _, sh := range pw.statsHandlers { + sh.HandleRPC(ctx, &stats.PickerUpdated{}) + } + } + + ch = pw.blockingCh + p := pw.picker + pw.mu.Unlock() + + pickResult, err := p.Pick(info) + if err != nil { + if err == balancer.ErrNoSubConnAvailable { + continue + } + if st, ok := status.FromError(err); ok { + // Status error: end the RPC unconditionally with this status. + // First restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) + } + return nil, balancer.PickResult{}, dropError{error: err} + } + // For all other errors, wait for ready RPCs should block and other + // RPCs should fail with unavailable. + if !failfast { + lastPickErr = err + continue + } + return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) + } + + acbw, ok := pickResult.SubConn.(*acBalancerWrapper) + if !ok { + logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) + continue + } + if t := acbw.ac.getReadyTransport(); t != nil { + if channelz.IsOn() { + doneChannelzWrapper(acbw, &pickResult) + return t, pickResult, nil + } + return t, pickResult, nil + } + if pickResult.Done != nil { + // Calling done with nil error, no bytes sent and no bytes received. + // DoneInfo with default value works. + pickResult.Done(balancer.DoneInfo{}) + } + logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (pw *pickerWrapper) close() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.done = true + close(pw.blockingCh) +} + +func (pw *pickerWrapper) enterIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.idle = true +} + +func (pw *pickerWrapper) exitIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.blockingCh = make(chan struct{}) + pw.idle = false +} + +// dropError is a wrapper error that indicates the LB policy wishes to drop the +// RPC and not retry it. +type dropError struct { + error +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 0000000000..2e9cf66b4a --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,263 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "errors" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +const ( + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = "pick_first" + logPrefix = "[pick-first-lb %p] " +) + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + b := &pickfirstBalancer{cc: cc} + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b +} + +func (*pickfirstBuilder) Name() string { + return PickFirstBalancerName +} + +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of addresses received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + if !envconfig.PickFirstLBConfig { + // Prior to supporting loadbalancing configuration, the pick_first LB + // policy did not implement the balancer.ConfigParser interface. This + // meant that if a non-empty configuration was passed to it, the service + // config unmarshaling code would throw a warning log, but would + // continue using the pick_first LB policy. The code below ensures the + // same behavior is retained if the env var is not set. + if string(js) != "{}" { + logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js)) + } + return nil, nil + } + + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + +type pickfirstBalancer struct { + logger *internalgrpclog.PrefixLogger + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn +} + +func (b *pickfirstBalancer) ResolverError(err error) { + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) + } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) +} + +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + addrs := state.ResolverState.Addresses + if len(addrs) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Shut down the old subConn. All addresses were removed, so it is + // no longer valid. + b.subConn.Shutdown() + b.subConn = nil + } + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } + + // We don't have to guard this block with the env var because ParseConfig + // already does so. + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + } + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) + grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, addrs) + return nil + } + + var subConn balancer.SubConn + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(subConn, state) + }, + }) + if err != nil { + if b.logger.V(2) { + b.logger.Infof("Failed to create new SubConn: %v", err) + } + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState + } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + b.subConn.Connect() + return nil +} + +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + if b.logger.V(2) { + b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) + } + if b.subConn != subConn { + if b.logger.V(2) { + b.logger.Infof("Ignored state change because subConn is not recognized") + } + return + } + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil + return + } + + switch state.ConnectivityState { + case connectivity.Ready: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) + case connectivity.Connecting: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. See A62. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) + case connectivity.Idle: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. Also kick the + // subConn out of Idle into Connecting. See A62. + b.subConn.Connect() + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) + case connectivity.TransientFailure: + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, + }) + } + b.state = state.ConnectivityState +} + +func (b *pickfirstBalancer) Close() { +} + +func (b *pickfirstBalancer) ExitIdle() { + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() + } +} + +type picker struct { + result balancer.PickResult + err error +} + +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + return p.result, p.err +} + +// idlePicker is used when the SubConn is IDLE and kicks the SubConn into +// CONNECTING when Pick is called. +type idlePicker struct { + subConn balancer.SubConn +} + +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() + return balancer.PickResult{}, balancer.ErrNoSubConnAvailable +} + +func init() { + balancer.Register(newPickfirstBuilder()) +} diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go new file mode 100644 index 0000000000..73bd633643 --- /dev/null +++ b/vendor/google.golang.org/grpc/preloader.go @@ -0,0 +1,67 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// PreparedMsg is responsible for creating a Marshalled and Compressed object. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PreparedMsg struct { + // Struct for preparing msg before sending them + encodedData []byte + hdr []byte + payload []byte +} + +// Encode marshalls and compresses the message using the codec and compressor for the stream. +func (p *PreparedMsg) Encode(s Stream, msg any) error { + ctx := s.Context() + rpcInfo, ok := rpcInfoFromContext(ctx) + if !ok { + return status.Errorf(codes.Internal, "grpc: unable to get rpcInfo") + } + + // check if the context has the relevant information to prepareMsg + if rpcInfo.preloaderInfo == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil") + } + if rpcInfo.preloaderInfo.codec == nil { + return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil") + } + + // prepare the msg + data, err := encode(rpcInfo.preloaderInfo.codec, msg) + if err != nil { + return err + } + p.encodedData = data + compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp) + if err != nil { + return err + } + p.hdr, p.payload = msgHeader(data, compData) + return nil +} diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh new file mode 100644 index 0000000000..a6f26c8ab0 --- /dev/null +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -0,0 +1,123 @@ +#!/bin/bash +# Copyright 2020 gRPC authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu -o pipefail + +WORKDIR=$(mktemp -d) + +function finish { + rm -rf "$WORKDIR" +} +trap finish EXIT + +export GOBIN=${WORKDIR}/bin +export PATH=${GOBIN}:${PATH} +mkdir -p ${GOBIN} + +echo "remove existing generated files" +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') + +echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" +(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) + +echo "go install cmd/protoc-gen-go-grpc" +(cd cmd/protoc-gen-go-grpc && go install .) + +echo "git clone https://github.com/grpc/grpc-proto" +git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto + +echo "git clone https://github.com/protocolbuffers/protobuf" +git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf + +# Pull in code.proto as a proto dependency +mkdir -p ${WORKDIR}/googleapis/google/rpc +echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto" +curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto + +mkdir -p ${WORKDIR}/out + +# Generates sources without the embed requirement +LEGACY_SOURCES=( + ${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto + ${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto + ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto + ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto + profiling/proto/service.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto +) + +# Generates only the new gRPC Service symbols +SOURCES=( + $(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^\(profiling/proto/service.proto\|reflection/grpc_reflection_v1alpha/reflection.proto\)$') + ${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto + ${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto + ${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto + ${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto + ${WORKDIR}/grpc-proto/grpc/testing/*.proto + ${WORKDIR}/grpc-proto/grpc/core/*.proto +) + +# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an +# import path of 'bar' in the generated code when 'foo.proto' is imported in +# one of the sources. +# +# Note that the protos listed here are all for testing purposes. All protos to +# be used externally should have a go_package option (and they don't need to be +# listed here). +OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing + +for src in ${SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS}:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/protobuf/src \ + ${src} +done + +for src in ${LEGACY_SOURCES[@]}; do + echo "protoc ${src}" + protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \ + -I"." \ + -I${WORKDIR}/grpc-proto \ + -I${WORKDIR}/googleapis \ + -I${WORKDIR}/protobuf/src \ + ${src} +done + +# The go_package option in grpc/lookup/v1/rls.proto doesn't match the +# current location. Move it into the right place. +mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 +mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 + +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go + +cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go new file mode 100644 index 0000000000..804be887de --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -0,0 +1,138 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package resolver + +type addressMapEntry struct { + addr Address + value any +} + +// AddressMap is a map of addresses to arbitrary values taking into account +// Attributes. BalancerAttributes are ignored, as are Metadata and Type. +// Multiple accesses may not be performed concurrently. Must be created via +// NewAddressMap; do not construct directly. +type AddressMap struct { + // The underlying map is keyed by an Address with fields that we don't care + // about being set to their zero values. The only fields that we care about + // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to + // distinguish between addresses with same `Addr` and `ServerName`, but + // different `Attributes`, we cannot store the `Attributes` in the map key. + // + // The comparison operation for structs work as follows: + // Struct values are comparable if all their fields are comparable. Two + // struct values are equal if their corresponding non-blank fields are equal. + // + // The value type of the map contains a slice of addresses which match the key + // in their `Addr` and `ServerName` fields and contain the corresponding value + // associated with them. + m map[Address]addressMapEntryList +} + +func toMapKey(addr *Address) Address { + return Address{Addr: addr.Addr, ServerName: addr.ServerName} +} + +type addressMapEntryList []*addressMapEntry + +// NewAddressMap creates a new AddressMap. +func NewAddressMap() *AddressMap { + return &AddressMap{m: make(map[Address]addressMapEntryList)} +} + +// find returns the index of addr in the addressMapEntry slice, or -1 if not +// present. +func (l addressMapEntryList) find(addr Address) int { + for i, entry := range l { + // Attributes are the only thing to match on here, since `Addr` and + // `ServerName` are already equal. + if entry.addr.Attributes.Equal(addr.Attributes) { + return i + } + } + return -1 +} + +// Get returns the value for the address in the map, if present. +func (a *AddressMap) Get(addr Address) (value any, ok bool) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { + return entryList[entry].value, true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (a *AddressMap) Set(addr Address, value any) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + if entry := entryList.find(addr); entry != -1 { + entryList[entry].value = value + return + } + a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) +} + +// Delete removes addr from the map. +func (a *AddressMap) Delete(addr Address) { + addrKey := toMapKey(&addr) + entryList := a.m[addrKey] + entry := entryList.find(addr) + if entry == -1 { + return + } + if len(entryList) == 1 { + entryList = nil + } else { + copy(entryList[entry:], entryList[entry+1:]) + entryList = entryList[:len(entryList)-1] + } + a.m[addrKey] = entryList +} + +// Len returns the number of entries in the map. +func (a *AddressMap) Len() int { + ret := 0 + for _, entryList := range a.m { + ret += len(entryList) + } + return ret +} + +// Keys returns a slice of all current map keys. +func (a *AddressMap) Keys() []Address { + ret := make([]Address, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.addr) + } + } + return ret +} + +// Values returns a slice of all current map values. +func (a *AddressMap) Values() []any { + ret := make([]any, 0, a.Len()) + for _, entryList := range a.m { + for _, entry := range entryList { + ret = append(ret, entry.value) + } + } + return ret +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 0000000000..11384e228e --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,316 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +import ( + "context" + "fmt" + "net" + "net/url" + "strings" + + "google.golang.org/grpc/attributes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/serviceconfig" +) + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme = "passthrough" +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. b.Scheme will +// be used as the scheme registered with this builder. The registry is case +// sensitive, and schemes should not contain any uppercase characters. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Resolvers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// +// If no builder is register with the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. The default +// default scheme is "passthrough". +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. The scheme set last overrides +// previously set values. +func SetDefaultScheme(scheme string) { + defaultScheme = scheme +} + +// GetDefaultScheme gets the default scheme that will be used. +func GetDefaultScheme() string { + return defaultScheme +} + +// Address represents a server the client connects to. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + + // ServerName is the name of this address. + // If non-empty, the ServerName is used as the transport certification authority for + // the address, instead of the hostname from the Dial target string. In most cases, + // this should not be set. + // + // WARNING: ServerName must only be populated with trusted values. It + // is insecure to populate it with data from untrusted inputs since untrusted + // values could be used to bypass the authority checks performed by TLS. + ServerName string + + // Attributes contains arbitrary data about this address intended for + // consumption by the SubConn. + Attributes *attributes.Attributes + + // BalancerAttributes contains arbitrary data about this address intended + // for consumption by the LB policy. These attributes do not affect SubConn + // creation, connection establishment, handshaking, etc. + // + // Deprecated: when an Address is inside an Endpoint, this field should not + // be used, and it will eventually be removed entirely. + BalancerAttributes *attributes.Attributes + + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + // + // Deprecated: use Attributes instead. + Metadata any +} + +// Equal returns whether a and o are identical. Metadata is compared directly, +// not with any recursive introspection. +// +// This method compares all fields of the address. When used to tell apart +// addresses during subchannel creation or connection establishment, it might be +// more appropriate for the caller to implement custom equality logic. +func (a Address) Equal(o Address) bool { + return a.Addr == o.Addr && a.ServerName == o.ServerName && + a.Attributes.Equal(o.Attributes) && + a.BalancerAttributes.Equal(o.BalancerAttributes) && + a.Metadata == o.Metadata +} + +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + var sb strings.Builder + sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) + sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) + if a.Attributes != nil { + sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) + } + if a.BalancerAttributes != nil { + sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) + } + sb.WriteString("}") + return sb.String() +} + +// BuildOptions includes additional information for the builder to create +// the resolver. +type BuildOptions struct { + // DisableServiceConfig indicates whether a resolver implementation should + // fetch service config data. + DisableServiceConfig bool + // DialCreds is the transport credentials used by the ClientConn for + // communicating with the target gRPC service (set via + // WithTransportCredentials). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + DialCreds credentials.TransportCredentials + // CredsBundle is the credentials bundle used by the ClientConn for + // communicating with the target gRPC service (set via + // WithCredentialsBundle). In cases where a name resolution service + // requires the same credentials, the resolver may use this field. In most + // cases though, it is not appropriate, and this field may be ignored. + CredsBundle credentials.Bundle + // Dialer is the custom dialer used by the ClientConn for dialling the + // target gRPC service (set via WithDialer). In cases where a name + // resolution service requires the same dialer, the resolver may use this + // field. In most cases though, it is not appropriate, and this field may + // be ignored. + Dialer func(context.Context, string) (net.Conn, error) +} + +// An Endpoint is one network endpoint, or server, which may have multiple +// addresses with which it can be accessed. +type Endpoint struct { + // Addresses contains a list of addresses used to access this endpoint. + Addresses []Address + + // Attributes contains arbitrary data about this endpoint intended for + // consumption by the LB policy. + Attributes *attributes.Attributes +} + +// State contains the current Resolver state relevant to the ClientConn. +type State struct { + // Addresses is the latest set of resolved addresses for the target. + // + // If a resolver sets Addresses but does not set Endpoints, one Endpoint + // will be created for each Address before the State is passed to the LB + // policy. The BalancerAttributes of each entry in Addresses will be set + // in Endpoints.Attributes, and be cleared in the Endpoint's Address's + // BalancerAttributes. + // + // Soon, Addresses will be deprecated and replaced fully by Endpoints. + Addresses []Address + + // Endpoints is the latest set of resolved endpoints for the target. + // + // If a resolver produces a State containing Endpoints but not Addresses, + // it must take care to ensure the LB policies it selects will support + // Endpoints. + Endpoints []Endpoint + + // ServiceConfig contains the result from parsing the latest service + // config. If it is nil, it indicates no service config is present or the + // resolver does not provide service configs. + ServiceConfig *serviceconfig.ParseResult + + // Attributes contains arbitrary data about the resolver intended for + // consumption by the load balancing policy. + Attributes *attributes.Attributes +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // UpdateState updates the state of the ClientConn appropriately. + // + // If an error is returned, the resolver should try to resolve the + // target again. The resolver should use a backoff timer to prevent + // overloading the server with requests. If a resolver is certain that + // reresolving will not change the result, e.g. because it is + // a watch-based resolver, returned errors can be ignored. + // + // If the resolved State is the same as the last reported one, calling + // UpdateState can be omitted. + UpdateState(State) error + // ReportError notifies the ClientConn that the Resolver encountered an + // error. The ClientConn will notify the load balancer and begin calling + // ResolveNow on the Resolver with exponential backoff. + ReportError(error) + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + // + // Deprecated: Use UpdateState instead. + NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + // + // Deprecated: Use UpdateState instead. + NewServiceConfig(serviceConfig string) + // ParseServiceConfig parses the provided service config and returns an + // object that provides the parsed config. + ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// It is parsed from the target string that gets passed into Dial or DialContext +// by the user. And gRPC passes it to the resolver and the balancer. +// +// If the target follows the naming spec, and the parsed scheme is registered +// with gRPC, we will parse the target string according to the spec. If the +// target does not contain a scheme or if the parsed scheme is not registered +// (i.e. no corresponding resolver available to resolve the endpoint), we will +// apply the default scheme, and will attempt to reparse it. +type Target struct { + // URL contains the parsed dial target with an optional default scheme added + // to it if the original dial target contained no scheme or contained an + // unregistered scheme. Any query params specified in the original dial + // target can be accessed from here. + URL url.URL +} + +// Endpoint retrieves endpoint without leading "/" from either `URL.Path` +// or `URL.Opaque`. The latter is used when the former is empty. +func (t Target) Endpoint() string { + endpoint := t.URL.Path + if endpoint == "" { + endpoint = t.URL.Opaque + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field. + return strings.TrimPrefix(endpoint, "/") +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error) + // Scheme returns the scheme supported by this resolver. Scheme is defined + // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned + // string should not contain uppercase characters, as they will not match + // the parsed target's scheme as defined in RFC 3986. + Scheme() string +} + +// ResolveNowOptions includes additional information for ResolveNow. +type ResolveNowOptions struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name + // again. It's just a hint, resolver can ignore this if it's not necessary. + // + // It could be called multiple times concurrently. + ResolveNow(ResolveNowOptions) + // Close closes the resolver. + Close() +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 0000000000..d683305608 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,247 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "strings" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// resolverStateUpdater wraps the single method used by ccResolverWrapper to +// report a state update from the actual resolver implementation. +type resolverStateUpdater interface { + updateResolverState(s resolver.State, err error) error +} + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc resolverStateUpdater + channelzID *channelz.Identifier + ignoreServiceConfig bool + opts ccResolverWrapperOpts + serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. + serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). + + // All incoming (resolver --> gRPC) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled on the serializer. + // Fields accessed *only* in these serializer callbacks, can therefore be + // accessed without a mutex. + curState resolver.State + + // mu guards access to the below fields. + mu sync.Mutex + closed bool + resolver resolver.Resolver // Accessed only from outgoing calls. +} + +// ccResolverWrapperOpts wraps the arguments to be passed when creating a new +// ccResolverWrapper. +type ccResolverWrapperOpts struct { + target resolver.Target // User specified dial target to resolve. + builder resolver.Builder // Resolver builder to use. + bOpts resolver.BuildOptions // Resolver build options to use. + channelzID *channelz.Identifier // Channelz identifier for the channel. +} + +// newCCResolverWrapper uses the resolver.Builder to build a Resolver and +// returns a ccResolverWrapper object which wraps the newly built resolver. +func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { + ctx, cancel := context.WithCancel(context.Background()) + ccr := &ccResolverWrapper{ + cc: cc, + channelzID: opts.channelzID, + ignoreServiceConfig: opts.bOpts.DisableServiceConfig, + opts: opts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + + // Cannot hold the lock at build time because the resolver can send an + // update or error inline and these incoming calls grab the lock to schedule + // a callback in the serializer. + r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) + if err != nil { + cancel() + return nil, err + } + + // Any error reported by the resolver at build time that leads to a + // re-resolution request from the balancer is dropped by grpc until we + // return from this function. So, we don't have to handle pending resolveNow + // requests here. + ccr.mu.Lock() + ccr.resolver = r + ccr.mu.Unlock() + + return ccr, nil +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.mu.Lock() + defer ccr.mu.Unlock() + + // ccr.resolver field is set only after the call to Build() returns. But in + // the process of building, the resolver may send an error update which when + // propagated to the balancer may result in a re-resolution request. + if ccr.closed || ccr.resolver == nil { + return + } + ccr.resolver.ResolveNow(o) +} + +func (ccr *ccResolverWrapper) close() { + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + return + } + + channelz.Info(logger, ccr.channelzID, "Closing the name resolver") + + // Close the serializer to ensure that no more calls from the resolver are + // handled, before actually closing the resolver. + ccr.serializerCancel() + ccr.closed = true + r := ccr.resolver + ccr.mu.Unlock() + + // Give enqueued callbacks a chance to finish. + <-ccr.serializer.Done() + + // Spawn a goroutine to close the resolver (since it may block trying to + // cleanup all allocated resources) and return early. + go r.Close() +} + +// serializerScheduleLocked is a convenience method to schedule a function to be +// run on the serializer while holding ccr.mu. +func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { + ccr.mu.Lock() + ccr.serializer.Schedule(f) + ccr.mu.Unlock() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + errCh := make(chan error, 1) + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ok := ccr.serializer.Schedule(func(context.Context) { + ccr.addChannelzTraceEvent(s) + ccr.curState = s + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + errCh <- balancer.ErrBadResolverState + return + } + errCh <- nil + }) + if !ok { + // The only time when Schedule() fail to add the callback to the + // serializer is when the serializer is closed, and this happens only + // when the resolver wrapper is closed. + return nil + } + return <-errCh +} + +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverState(resolver.State{}, err) + }) +} + +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.serializerScheduleLocked(func(_ context.Context) { + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + ccr.curState.Addresses = addrs + ccr.cc.updateResolverState(ccr.curState, nil) + }) +} + +// NewServiceConfig is called by the resolver implementation to send service +// configs to gRPC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) + if ccr.ignoreServiceConfig { + channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + return + } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) + ccr.curState.ServiceConfig = scpr + ccr.cc.updateResolverState(ccr.curState, nil) + }) +} + +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 0000000000..b7723aa09c --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,959 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/binary" + "fmt" + "io" + "math" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// Compressor defines the interface gRPC uses to compress a message. +// +// Deprecated: use package encoding. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +type gzipCompressor struct { + pool sync.Pool +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressor() Compressor { + c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) + return c +} + +// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead +// of assuming DefaultCompression. +// +// The error returned will be nil if the level is valid. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return nil, fmt.Errorf("grpc: invalid compression level: %d", level) + } + return &gzipCompressor{ + pool: sync.Pool{ + New: func() any { + w, err := gzip.NewWriterLevel(io.Discard, level) + if err != nil { + panic(err) + } + return w + }, + }, + }, nil +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +// +// Deprecated: use package encoding. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { + pool sync.Pool +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } + } + + defer func() { + z.Close() + d.pool.Put(z) + }() + return io.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + compressorType string + failFast bool + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials + contentSubtype string + codec baseCodec + maxRetryRPCBufferSize int + onFinish []func(err error) +} + +func defaultCallInfo() *callInfo { + return &callInfo{ + failFast: true, + maxRetryRPCBufferSize: 256 * 1024, // 256KB + } +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo, *csAttempt) +} + +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo, *csAttempt) {} + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return HeaderCallOption{HeaderAddr: md} +} + +// HeaderCallOption is a CallOption for collecting response header metadata. +// The metadata field will be populated *after* the RPC completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type HeaderCallOption struct { + HeaderAddr *metadata.MD +} + +func (o HeaderCallOption) before(c *callInfo) error { return nil } +func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) { + *o.HeaderAddr, _ = attempt.s.Header() +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return TrailerCallOption{TrailerAddr: md} +} + +// TrailerCallOption is a CallOption for collecting response trailer metadata. +// The metadata field will be populated *after* the RPC completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type TrailerCallOption struct { + TrailerAddr *metadata.MD +} + +func (o TrailerCallOption) before(c *callInfo) error { return nil } +func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) { + *o.TrailerAddr = attempt.s.Trailer() +} + +// Peer returns a CallOption that retrieves peer information for a unary RPC. +// The peer field will be populated *after* the RPC completes. +func Peer(p *peer.Peer) CallOption { + return PeerCallOption{PeerAddr: p} +} + +// PeerCallOption is a CallOption for collecting the identity of the remote +// peer. The peer field will be populated *after* the RPC completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PeerCallOption struct { + PeerAddr *peer.Peer +} + +func (o PeerCallOption) before(c *callInfo) error { return nil } +func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) { + if x, ok := peer.FromContext(attempt.s.Context()); ok { + *o.PeerAddr = *x + } +} + +// WaitForReady configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If waitForReady is false and the +// connection is in the TRANSIENT_FAILURE state, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will +// retry the call if it fails due to a transient error. gRPC will not retry if +// data was written to the wire unless the server indicates it did not process +// the data. Please refer to +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// +// By default, RPCs don't "wait for ready". +func WaitForReady(waitForReady bool) CallOption { + return FailFastCallOption{FailFast: !waitForReady} +} + +// FailFast is the opposite of WaitForReady. +// +// Deprecated: use WaitForReady. +func FailFast(failFast bool) CallOption { + return FailFastCallOption{FailFast: failFast} +} + +// FailFastCallOption is a CallOption for indicating whether an RPC should fail +// fast or not. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type FailFastCallOption struct { + FailFast bool +} + +func (o FailFastCallOption) before(c *callInfo) error { + c.failFast = o.FailFast + return nil +} +func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} + +// OnFinish returns a CallOption that configures a callback to be called when +// the call completes. The error passed to the callback is the status of the +// RPC, and may be nil. The onFinish callback provided will only be called once +// by gRPC. This is mainly used to be used by streaming interceptors, to be +// notified when the RPC completes along with information about the status of +// the RPC. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func OnFinish(onFinish func(err error)) CallOption { + return OnFinishCallOption{ + OnFinish: onFinish, + } +} + +// OnFinishCallOption is CallOption that indicates a callback to be called when +// the call completes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type OnFinishCallOption struct { + OnFinish func(error) +} + +func (o OnFinishCallOption) before(c *callInfo) error { + c.onFinish = append(c.onFinish, o.OnFinish) + return nil +} + +func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can receive. If this is not set, gRPC uses the default +// 4MB. +func MaxCallRecvMsgSize(bytes int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} +} + +// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can receive. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxRecvMsgSizeCallOption struct { + MaxRecvMsgSize int +} + +func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { + c.maxReceiveMessageSize = &o.MaxRecvMsgSize + return nil +} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can send. If this is not set, gRPC uses the default +// `math.MaxInt32`. +func MaxCallSendMsgSize(bytes int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} +} + +// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message +// size in bytes the client can send. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxSendMsgSizeCallOption struct { + MaxSendMsgSize int +} + +func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { + c.maxSendMessageSize = &o.MaxSendMsgSize + return nil +} +func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return PerRPCCredsCallOption{Creds: creds} +} + +// PerRPCCredsCallOption is a CallOption that indicates the per-RPC +// credentials to use for the call. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type PerRPCCredsCallOption struct { + Creds credentials.PerRPCCredentials +} + +func (o PerRPCCredsCallOption) before(c *callInfo) error { + c.creds = o.Creds + return nil +} +func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} + +// UseCompressor returns a CallOption which sets the compressor used when +// sending the request. If WithCompressor is also set, UseCompressor has +// higher priority. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func UseCompressor(name string) CallOption { + return CompressorCallOption{CompressorType: name} +} + +// CompressorCallOption is a CallOption that indicates the compressor to use. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type CompressorCallOption struct { + CompressorType string +} + +func (o CompressorCallOption) before(c *callInfo) error { + c.compressorType = o.CompressorType + return nil +} +func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {} + +// CallContentSubtype returns a CallOption that will set the content-subtype +// for a call. For example, if content-subtype is "json", the Content-Type over +// the wire will be "application/grpc+json". The content-subtype is converted +// to lowercase before being included in Content-Type. See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If ForceCodec is not also used, the content-subtype will be used to look up +// the Codec to use in the registry controlled by RegisterCodec. See the +// documentation on RegisterCodec for details on registration. The lookup of +// content-subtype is case-insensitive. If no such Codec is found, the call +// will result in an error with code codes.Internal. +// +// If ForceCodec is also used, that Codec will be used for all request and +// response messages, with the content-subtype set to the given contentSubtype +// here for requests. +func CallContentSubtype(contentSubtype string) CallOption { + return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} +} + +// ContentSubtypeCallOption is a CallOption that indicates the content-subtype +// used for marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ContentSubtypeCallOption struct { + ContentSubtype string +} + +func (o ContentSubtypeCallOption) before(c *callInfo) error { + c.contentSubtype = o.ContentSubtype + return nil +} +func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// ForceCodec returns a CallOption that will set codec to be used for all +// request and response messages for a call. The result of calling Name() will +// be used as the content-subtype after converting to lowercase, unless +// CallContentSubtype is also used. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceCodec(codec encoding.Codec) CallOption { + return ForceCodecCallOption{Codec: codec} +} + +// ForceCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ForceCodecCallOption struct { + Codec encoding.Codec +} + +func (o ForceCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {} + +// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of +// an encoding.Codec. +// +// Deprecated: use ForceCodec instead. +func CallCustomCodec(codec Codec) CallOption { + return CustomCodecCallOption{Codec: codec} +} + +// CustomCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type CustomCodecCallOption struct { + Codec Codec +} + +func (o CustomCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} + +// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory +// used for buffering this RPC's requests for retry purposes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func MaxRetryRPCBufferSize(bytes int) CallOption { + return MaxRetryRPCBufferSizeCallOption{bytes} +} + +// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of +// memory to be used for caching this RPC for retry purposes. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type MaxRetryRPCBufferSizeCallOption struct { + MaxRetryRPCBufferSize int +} + +func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error { + c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize + return nil +} +func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = 0 // no compression + compressionMade payloadFormat = 1 // compressed +) + +// parser reads complete gRPC messages from the underlying reader. +type parser struct { + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader + + // The header of a gRPC message. Find more detail at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md + header [5]byte + + // recvBufferPool is the pool of shared receive buffers. + recvBufferPool SharedBufferPool +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// - io.EOF, when no messages remain +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +// +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { + return 0, nil, err + } + + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil + } + if int64(length) > int64(maxInt) { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) + } + if int(length) > maxReceiveMessageSize { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } + msg = p.recvBufferPool.Get(int(length)) + if _, err := p.r.Read(msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, msg, nil +} + +// encode serializes msg and returns a buffer containing the message, or an +// error if it is too large to be transmitted by grpc. If msg is nil, it +// generates an empty message. +func encode(c baseCodec, msg any) ([]byte, error) { + if msg == nil { // NOTE: typed nils will not be caught by this check + return nil, nil + } + b, err := c.Marshal(msg) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if uint(len(b)) > math.MaxUint32 { + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + return b, nil +} + +// compress returns the input bytes compressed by compressor or cp. If both +// compressors are nil, returns nil. +// +// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. +func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { + if compressor == nil && cp == nil { + return nil, nil + } + wrapErr := func(err error) error { + return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + cbuf := &bytes.Buffer{} + if compressor != nil { + z, err := compressor.Compress(cbuf) + if err != nil { + return nil, wrapErr(err) + } + if _, err := z.Write(in); err != nil { + return nil, wrapErr(err) + } + if err := z.Close(); err != nil { + return nil, wrapErr(err) + } + } else { + if err := cp.Do(cbuf, in); err != nil { + return nil, wrapErr(err) + } + } + return cbuf.Bytes(), nil +} + +const ( + payloadLen = 1 + sizeLen = 4 + headerLen = payloadLen + sizeLen +) + +// msgHeader returns a 5-byte header for the message being transmitted and the +// payload, which is compData if non-nil or data otherwise. +func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { + hdr = make([]byte, headerLen) + if compData != nil { + hdr[0] = byte(compressionMade) + data = compData + } else { + hdr[0] = byte(compressionNone) + } + + // Write length of payload into buf + binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data))) + return hdr, data +} + +func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { + return &stats.OutPayload{ + Client: client, + Payload: msg, + Data: data, + Length: len(data), + WireLength: len(payload) + headerLen, + CompressedLength: len(payload), + SentTime: t, + } +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" || recvCompress == encoding.Identity { + return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") + } + if !haveCompressor { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +type payloadInfo struct { + compressedLength int // The compressed length got from wire. + uncompressedBytes []byte +} + +func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { + pf, buf, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return nil, err + } + if payInfo != nil { + payInfo.compressedLength = len(buf) + } + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { + return nil, st.Err() + } + + var size int + if pf == compressionMade { + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, + // use this decompressor as the default. + if dc != nil { + buf, err = dc.Do(bytes.NewReader(buf)) + size = len(buf) + } else { + buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) + } + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + } + if size > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + } + } + return buf, nil +} + +// Using compressor, decompress d, returning data and size. +// Optionally, if data will be over maxReceiveMessageSize, just return the size. +func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) { + dcReader, err := compressor.Decompress(bytes.NewReader(d)) + if err != nil { + return nil, 0, err + } + if sizer, ok := compressor.(interface { + DecompressedSize(compressedBytes []byte) int + }); ok { + if size := sizer.DecompressedSize(d); size >= 0 { + if size > maxReceiveMessageSize { + return nil, size, nil + } + // size is used as an estimate to size the buffer, but we + // will read more data if available. + // +MinRead so ReadFrom will not reallocate if size is correct. + buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead)) + bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return buf.Bytes(), int(bytesRead), err + } + } + // Read from LimitReader with limit max+1. So if the underlying + // reader is over limit, the result will be bigger than max. + d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + return d, len(d), err +} + +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { + buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + if err != nil { + return err + } + if err := c.Unmarshal(buf, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) + } + if payInfo != nil { + payInfo.uncompressedBytes = buf + } else { + p.recvBufferPool.Put(&buf) + } + return nil +} + +// Information about RPC +type rpcInfo struct { + failfast bool + preloaderInfo *compressorInfo +} + +// Information about Preloader +// Responsible for storing codec, and compressors +// If stream (s) has context s.Context which stores rpcInfo that has non nil +// pointers to codec, and compressors, then we can use preparedMsg for Async message prep +// and reuse marshalled bytes +type compressorInfo struct { + codec baseCodec + cp Compressor + comp encoding.Compressor +} + +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{ + failfast: failfast, + preloaderInfo: &compressorInfo{ + codec: codec, + cp: cp, + comp: comp, + }, + }) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { + s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) + return +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated: use status.Code instead. +func Code(err error) codes.Code { + return status.Code(err) +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated: use status.Convert and Message method instead. +func ErrorDesc(err error) string { + return status.Convert(err).Message() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated: use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...any) error { + return status.Errorf(c, format, a...) +} + +var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) +var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + switch err { + case nil, io.EOF: + return err + case context.DeadlineExceeded: + return errContextDeadline + case context.Canceled: + return errContextCanceled + case io.ErrUnexpectedEOF: + return status.Error(codes.Internal, err.Error()) + } + + switch e := err.(type) { + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + case *transport.NewStreamError: + return toRPCErr(e.Err) + } + + if _, ok := status.FromError(err); ok { + return err + } + + return status.Error(codes.Unknown, err.Error()) +} + +// setCallInfoCodec should only be called after CallOptions have been applied. +func setCallInfoCodec(c *callInfo) error { + if c.codec != nil { + // codec was already set by a CallOption; use it, but set the content + // subtype if it is not set. + if c.contentSubtype == "" { + // c.codec is a baseCodec to hide the difference between grpc.Codec and + // encoding.Codec (Name vs. String method name). We only support + // setting content subtype from encoding.Codec to avoid a behavior + // change with the deprecated version. + if ec, ok := c.codec.(encoding.Codec); ok { + c.contentSubtype = strings.ToLower(ec.Name()) + } + } + return nil + } + + if c.contentSubtype == "" { + // No codec specified in CallOptions; use proto by default. + c.codec = encoding.GetCodec(proto.Name) + return nil + } + + // c.contentSubtype is already lowercased in CallContentSubtype + c.codec = encoding.GetCodec(c.contentSubtype) + if c.codec == nil { + return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) + } + return nil +} + +// channelzData is used to store channelz related data for ClientConn, addrConn and Server. +// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic +// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. +// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. +type channelzData struct { + callsStarted int64 + callsFailed int64 + callsSucceeded int64 + // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of + // time.Time since it's more costly to atomically update time.Time variable than int64 variable. + lastCallStartedTime int64 +} + +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used. The latest +// support package version is 7. +// +// Older versions are kept for compatibility. +// +// These constants should not be referenced from any other code. +const ( + SupportPackageIsVersion3 = true + SupportPackageIsVersion4 = true + SupportPackageIsVersion5 = true + SupportPackageIsVersion6 = true + SupportPackageIsVersion7 = true +) + +const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go new file mode 100644 index 0000000000..eeae92fbe0 --- /dev/null +++ b/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,2122 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/trace" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 + + // Server transports are tracked in a map which is keyed on listener + // address. For regular gRPC traffic, connections are accepted in Serve() + // through a call to Accept(), and we use the actual listener address as key + // when we add it to the map. But for connections received through + // ServeHTTP(), we do not have a listener and hence use this dummy value. + listenerAddressForServeHTTP = "listenerAddressForServeHTTP" +) + +func init() { + internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { + return srv.opts.creds + } + internal.DrainServerTransports = func(srv *Server, addr string) { + srv.drainServerTransports(addr) + } + internal.AddGlobalServerOptions = func(opt ...ServerOption) { + globalServerOptions = append(globalServerOptions, opt...) + } + internal.ClearGlobalServerOptions = func() { + globalServerOptions = nil + } + internal.BinaryLogger = binaryLogger + internal.JoinServerOptions = newJoinServerOption +} + +var statusOK = status.New(codes.OK, "") +var logger = grpclog.Component("core") + +type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType any + Methods []MethodDesc + Streams []StreamDesc + Metadata any +} + +// serviceInfo wraps information about a service. It is very similar to +// ServiceDesc and is constructed from it for internal purposes. +type serviceInfo struct { + // Contains the implementation for the methods in this service. + serviceImpl any + methods map[string]*MethodDesc + streams map[string]*StreamDesc + mdata any +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts serverOptions + + mu sync.Mutex // guards following + lis map[net.Listener]bool + // conns contains all active server transports. It is a map keyed on a + // listener address with the value being the set of active transports + // belonging to that listener. + conns map[string]map[transport.ServerTransport]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + services map[string]*serviceInfo // service name -> service info + events trace.EventLog + + quit *grpcsync.Event + done *grpcsync.Event + channelzRemoveOnce sync.Once + serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + + channelzID *channelz.Identifier + czData *channelzData + + serverWorkerChannel chan func() +} + +type serverOptions struct { + creds credentials.TransportCredentials + codec baseCodec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + chainUnaryInts []UnaryServerInterceptor + chainStreamInts []StreamServerInterceptor + binaryLogger binarylog.Logger + inTapHandle tap.ServerInHandle + statsHandlers []stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int + sharedWriteBuffer bool + connectionTimeout time.Duration + maxHeaderListSize *uint32 + headerTableSize *uint32 + numServerWorkers uint32 + recvBufferPool SharedBufferPool +} + +var defaultServerOptions = serverOptions{ + maxConcurrentStreams: math.MaxUint32, + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, + writeBufferSize: defaultWriteBufSize, + readBufferSize: defaultReadBufSize, + recvBufferPool: nopBufferPool{}, +} +var globalServerOptions []ServerOption + +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +type ServerOption interface { + apply(*serverOptions) +} + +// EmptyServerOption does not alter the server configuration. It can be embedded +// in another structure to build custom server options. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type EmptyServerOption struct{} + +func (EmptyServerOption) apply(*serverOptions) {} + +// funcServerOption wraps a function that modifies serverOptions into an +// implementation of the ServerOption interface. +type funcServerOption struct { + f func(*serverOptions) +} + +func (fdo *funcServerOption) apply(do *serverOptions) { + fdo.f(do) +} + +func newFuncServerOption(f func(*serverOptions)) *funcServerOption { + return &funcServerOption{ + f: f, + } +} + +// joinServerOption provides a way to combine arbitrary number of server +// options into one. +type joinServerOption struct { + opts []ServerOption +} + +func (mdo *joinServerOption) apply(do *serverOptions) { + for _, opt := range mdo.opts { + opt.apply(do) + } +} + +func newJoinServerOption(opts ...ServerOption) ServerOption { + return &joinServerOption{opts: opts} +} + +// SharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func SharedWriteBuffer(val bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.sharedWriteBuffer = val + }) +} + +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The corresponding memory allocation for this buffer will be +// twice the size to keep syscalls low. The default value for this buffer is +// 32KB. Zero or negative values will disable the write buffer such that each +// write will be on underlying connection. +// Note: A Send call may not directly translate to a write. +func WriteBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.writeBufferSize = s + }) +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. +func ReadBufferSize(s int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.readBufferSize = s + }) +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialWindowSize = s + }) +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.initialConnWindowSize = s + }) +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { + if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { + logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") + kp.Time = internal.KeepaliveMinServerPingTime + } + + return newFuncServerOption(func(o *serverOptions) { + o.keepaliveParams = kp + }) +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.keepalivePolicy = kep + }) +} + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +// +// Deprecated: register codecs using encoding.RegisterCodec. The server will +// automatically use registered codecs based on the incoming requests' headers. +// See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +func CustomCodec(codec Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + +// ForceServerCodec returns a ServerOption that sets a codec for message +// marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered +// with RegisterCodec. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between encoding.Codec +// and content-subtype. +// +// This function is provided for advanced users; prefer to register codecs +// using encoding.RegisterCodec. +// The server will automatically use registered codecs based on the incoming +// requests' headers. See also +// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec. +// Will be supported throughout 1.x. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ForceServerCodec(codec encoding.Codec) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.codec = codec + }) +} + +// RPCCompressor returns a ServerOption that sets a compressor for outbound +// messages. For backward compatibility, all outbound messages will be sent +// using this compressor, regardless of incoming message compression. By +// default, server messages will be sent using the same compressor with which +// request messages were sent. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func RPCCompressor(cp Compressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.cp = cp + }) +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound +// messages. It has higher priority than decompressors registered via +// encoding.RegisterCompressor. +// +// Deprecated: use encoding.RegisterCompressor instead. Will be supported +// throughout 1.x. +func RPCDecompressor(dc Decompressor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.dc = dc + }) +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. +// +// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x. +func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxReceiveMessageSize = m + }) +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default `math.MaxInt32`. +func MaxSendMsgSize(m int) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxSendMessageSize = m + }) +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + if n == 0 { + n = math.MaxUint32 + } + return newFuncServerOption(func(o *serverOptions) { + o.maxConcurrentStreams = n + }) +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.TransportCredentials) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.creds = c + }) +} + +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.unaryInt != nil { + panic("The unary server interceptor was already set and may not be reset.") + } + o.unaryInt = i + }) +} + +// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor +// for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All unary interceptors added by this method will be chained. +func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.streamInt != nil { + panic("The stream server interceptor was already set and may not be reset.") + } + o.streamInt = i + }) +} + +// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor +// for streaming RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All stream interceptors added by this method will be chained. +func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + +// InTapHandle returns a ServerOption that sets the tap handle for all the server +// transport to be created. Only one can be installed. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func InTapHandle(h tap.ServerInHandle) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if o.inTapHandle != nil { + panic("The tap handle was already set and may not be reset.") + } + o.inTapHandle = h + }) +} + +// StatsHandler returns a ServerOption that sets the stats handler for the server. +func StatsHandler(h stats.Handler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + if h == nil { + logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption") + // Do not allow a nil stats handler, which would otherwise cause + // panics. + return + } + o.statsHandlers = append(o.statsHandlers, h) + }) +} + +// binaryLogger returns a ServerOption that can set the binary logger for the +// server. +func binaryLogger(bl binarylog.Logger) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.binaryLogger = bl + }) +} + +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function and stream interceptor (if set) have full access to +// the ServerStream, including its Context. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + }) +} + +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections. If this is not set, the default is 120 seconds. A zero or +// negative value will result in an immediate timeout. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ConnectionTimeout(d time.Duration) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.connectionTimeout = d + }) +} + +// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size +// of header list that the server is prepared to accept. +func MaxHeaderListSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.maxHeaderListSize = &s + }) +} + +// HeaderTableSize returns a ServerOption that sets the size of dynamic +// header table for stream. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func HeaderTableSize(s uint32) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.headerTableSize = &s + }) +} + +// NumStreamWorkers returns a ServerOption that sets the number of worker +// goroutines that should be used to process incoming streams. Setting this to +// zero (default) will disable workers and spawn a new goroutine for each +// stream. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NumStreamWorkers(numServerWorkers uint32) ServerOption { + // TODO: If/when this API gets stabilized (i.e. stream workers become the + // only way streams are processed), change the behavior of the zero value to + // a sane default. Preliminary experiments suggest that a value equal to the + // number of CPUs available is most performant; requires thorough testing. + return newFuncServerOption(func(o *serverOptions) { + o.numServerWorkers = numServerWorkers + }) +} + +// RecvBufferPool returns a ServerOption that configures the server +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: StatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.recvBufferPool = bufferPool + }) +} + +// serverWorkerResetThreshold defines how often the stack must be reset. Every +// N requests, by spawning a new goroutine in its place, a worker can reset its +// stack so that large stacks don't live in memory forever. 2^16 should allow +// each goroutine stack to live for at least a few seconds in a typical +// workload (assuming a QPS of a few thousand requests/sec). +const serverWorkerResetThreshold = 1 << 16 + +// serverWorkers blocks on a *transport.Stream channel forever and waits for +// data to be fed by serveStreams. This allows multiple requests to be +// processed by the same goroutine, removing the need for expensive stack +// re-allocations (see the runtime.morestack problem [1]). +// +// [1] https://github.com/golang/go/issues/18138 +func (s *Server) serverWorker() { + for completed := 0; completed < serverWorkerResetThreshold; completed++ { + f, ok := <-s.serverWorkerChannel + if !ok { + return + } + f() + } + go s.serverWorker() +} + +// initServerWorkers creates worker goroutines and a channel to process incoming +// connections to reduce the time spent overall on runtime.morestack. +func (s *Server) initServerWorkers() { + s.serverWorkerChannel = make(chan func()) + for i := uint32(0); i < s.opts.numServerWorkers; i++ { + go s.serverWorker() + } +} + +func (s *Server) stopServerWorkers() { + close(s.serverWorkerChannel) +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions + for _, o := range globalServerOptions { + o.apply(&opts) + } + for _, o := range opt { + o.apply(&opts) + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[string]map[transport.ServerTransport]bool), + services: make(map[string]*serviceInfo), + quit: grpcsync.NewEvent(), + done: grpcsync.NewEvent(), + czData: new(channelzData), + } + chainUnaryServerInterceptors(s) + chainStreamServerInterceptors(s) + s.cv = sync.NewCond(&s.mu) + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + + if s.opts.numServerWorkers > 0 { + s.initServerWorkers() + } + + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + channelz.Info(logger, s.channelzID, "Server created") + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...any) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...any) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// ServiceRegistrar wraps a single method that supports service registration. It +// enables users to pass concrete types other than grpc.Server to the service +// registration methods exported by the IDL generated code. +type ServiceRegistrar interface { + // RegisterService registers a service and its implementation to the + // concrete type implementing this interface. It may not be called + // once the server has started serving. + // desc describes the service and its methods and handlers. impl is the + // service implementation which is passed to the method handlers. + RegisterService(desc *ServiceDesc, impl any) +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. If ss is non-nil (for legacy code), its type is checked to +// ensure it implements sd.HandlerType. +func (s *Server) RegisterService(sd *ServiceDesc, ss any) { + if ss != nil { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss any) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } + if _, ok := s.services[sd.ServiceName]; ok { + logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + info := &serviceInfo{ + serviceImpl: ss, + methods: make(map[string]*MethodDesc), + streams: make(map[string]*StreamDesc), + mdata: sd.Metadata, + } + for i := range sd.Methods { + d := &sd.Methods[i] + info.methods[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + info.streams[d.StreamName] = d + } + s.services[sd.ServiceName] = info +} + +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata any +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.services { + methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams)) + for m := range srv.methods { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.streams { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") + +type listenSocket struct { + net.Listener + channelzID *channelz.Identifier +} + +func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { + return &channelz.SocketInternalMetric{ + SocketOptions: channelz.GetSocketOption(l.Listener), + LocalAddr: l.Listener.Addr(), + } +} + +func (l *listenSocket) Close() error { + err := l.Listener.Close() + channelz.RemoveEntry(l.channelzID) + channelz.Info(logger, l.channelzID, "ListenSocket deleted") + return err +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Serve returns when lis.Accept fails with fatal errors. lis will be closed when +// this method returns. +// Serve will return a non-nil error unless Stop or GracefulStop is called. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + s.serve = true + if s.lis == nil { + // Serve called after Stop or GracefulStop. + s.mu.Unlock() + lis.Close() + return ErrServerStopped + } + + s.serveWG.Add(1) + defer func() { + s.serveWG.Done() + if s.quit.HasFired() { + // Stop or GracefulStop called; block until done and return nil. + <-s.done.Done() + } + }() + + ls := &listenSocket{Listener: lis} + s.lis[ls] = true + + defer func() { + s.mu.Lock() + if s.lis != nil && s.lis[ls] { + ls.Close() + delete(s.lis, ls) + } + s.mu.Unlock() + }() + + var err error + ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + if err != nil { + s.mu.Unlock() + return err + } + s.mu.Unlock() + channelz.Info(logger, ls.channelzID, "ListenSocket created") + + var tempDelay time.Duration // how long to sleep on accept failure + for { + rawConn, err := lis.Accept() + if err != nil { + if ne, ok := err.(interface { + Temporary() bool + }); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + s.mu.Lock() + s.printf("Accept error: %v; retrying in %v", err, tempDelay) + s.mu.Unlock() + timer := time.NewTimer(tempDelay) + select { + case <-timer.C: + case <-s.quit.Done(): + timer.Stop() + return nil + } + continue + } + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + + if s.quit.HasFired() { + return nil + } + return err + } + tempDelay = 0 + // Start a new goroutine to deal with rawConn so we don't stall this Accept + // loop goroutine. + // + // Make sure we account for the goroutine so GracefulStop doesn't nil out + // s.conns before this conn can be added. + s.serveWG.Add(1) + go func() { + s.handleRawConn(lis.Addr().String(), rawConn) + s.serveWG.Done() + }() + } +} + +// handleRawConn forks a goroutine to handle a just-accepted connection that +// has not had any I/O performed on it yet. +func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { + if s.quit.HasFired() { + rawConn.Close() + return + } + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) + + // Finish handshaking (HTTP2) + st := s.newHTTP2Transport(rawConn) + rawConn.SetDeadline(time.Time{}) + if st == nil { + return + } + + if !s.addConn(lisAddr, st) { + return + } + go func() { + s.serveStreams(st) + s.removeConn(lisAddr, st) + }() +} + +func (s *Server) drainServerTransports(addr string) { + s.mu.Lock() + conns := s.conns[addr] + for st := range conns { + st.Drain("") + } + s.mu.Unlock() +} + +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { + config := &transport.ServerConfig{ + MaxStreams: s.opts.maxConcurrentStreams, + ConnectionTimeout: s.opts.connectionTimeout, + Credentials: s.opts.creds, + InTapHandle: s.opts.inTapHandle, + StatsHandlers: s.opts.statsHandlers, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, + SharedWriteBuffer: s.opts.sharedWriteBuffer, + ChannelzParentID: s.channelzID, + MaxHeaderListSize: s.opts.maxHeaderListSize, + HeaderTableSize: s.opts.headerTableSize, + } + st, err := transport.NewServerTransport(c, config) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + // ErrConnDispatched means that the connection was dispatched away from + // gRPC; those connections should be left open. + if err != credentials.ErrConnDispatched { + // Don't log on ErrConnDispatched and io.EOF to prevent log spam. + if err != io.EOF { + channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + } + c.Close() + } + return nil + } + + return st +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer st.Close(errors.New("finished serving streams for the server transport")) + var wg sync.WaitGroup + + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + + streamQuota.acquire() + f := func() { + defer streamQuota.release() + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + } + + if s.opts.numServerWorkers > 0 { + select { + case s.serverWorkerChannel <- f: + return + default: + // If all stream workers are busy, fallback to the default code path. + } + } + go f() + }, func(ctx context.Context, method string) context.Context { + if !EnableTracing { + return ctx + } + tr := trace.New("grpc.Recv."+methodFamily(method), method) + return trace.NewContext(ctx, tr) + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) + if err != nil { + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. + return + } + if !s.addConn(listenerAddressForServeHTTP, st) { + return + } + defer s.removeConn(listenerAddressForServeHTTP, st) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + if !EnableTracing { + return nil + } + tr, ok := trace.FromContext(stream.Context()) + if !ok { + return nil + } + + trInfo = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: st.RemoteAddr(), + }, + } + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = time.Until(dl) + } + return trInfo +} + +func (s *Server) addConn(addr string, st transport.ServerTransport) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + st.Close(errors.New("Server.addConn called when server has already been stopped")) + return false + } + if s.drain { + // Transport added after we drained our existing conns: drain it + // immediately. + st.Drain("") + } + + if s.conns[addr] == nil { + // Create a map entry if this is the first connection on this listener. + s.conns[addr] = make(map[transport.ServerTransport]bool) + } + s.conns[addr][st] = true + return true +} + +func (s *Server) removeConn(addr string, st transport.ServerTransport) { + s.mu.Lock() + defer s.mu.Unlock() + + conns := s.conns[addr] + if conns != nil { + delete(conns, st) + if len(conns) == 0 { + // If the last connection for this address is being removed, also + // remove the map entry corresponding to the address. This is used + // in GracefulStop() when waiting for all connections to be closed. + delete(s.conns, addr) + } + s.cv.Broadcast() + } +} + +func (s *Server) channelzMetric() *channelz.ServerInternalMetric { + return &channelz.ServerInternalMetric{ + CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), + CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), + CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), + LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), + } +} + +func (s *Server) incrCallsStarted() { + atomic.AddInt64(&s.czData.callsStarted, 1) + atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) +} + +func (s *Server) incrCallsSucceeded() { + atomic.AddInt64(&s.czData.callsSucceeded, 1) +} + +func (s *Server) incrCallsFailed() { + atomic.AddInt64(&s.czData.callsFailed, 1) +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + data, err := encode(s.getCodec(stream.ContentSubtype()), msg) + if err != nil { + channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) + return err + } + compData, err := compress(data, cp, comp) + if err != nil { + channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err) + return err + } + hdr, payload := msgHeader(data, compData) + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, payload, opts) + if err == nil { + for _, sh := range s.opts.statsHandlers { + sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + } + } + return err +} + +// chainUnaryServerInterceptors chains all unary server interceptors into one. +func chainUnaryServerInterceptors(s *Server) { + // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainUnaryInts + if s.opts.unaryInt != nil { + interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...) + } + + var chainedInt UnaryServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = chainUnaryInterceptors(interceptors) + } + + s.opts.unaryInt = chainedInt +} + +func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { + return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(ctx context.Context, req any) (any, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { + shs := s.opts.statsHandlers + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + if channelz.IsOn() { + s.incrCallsStarted() + } + var statsBegin *stats.Begin + for _, sh := range shs { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + IsClientStream: false, + IsServerStream: false, + } + sh.HandleRPC(stream.Context(), statsBegin) + } + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + // The deferred error handling for tracing, stats handler and channelz are + // combined into one function to reduce stack usage -- a defer takes ~56-64 + // bytes on the stack, so overflowing the stack will require a stack + // re-allocation, which is expensive. + // + // To maintain behavior similar to separate deferred statements, statements + // should be executed in the reverse order. That is, tracing first, stats + // handler second, and channelz last. Note that panics *within* defers will + // lead to different behavior, but that's an acceptable compromise; that + // would be undefined behavior territory anyway. + defer func() { + if trInfo != nil { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + trInfo.tr.SetError() + } + trInfo.tr.Finish() + } + + for _, sh := range shs { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + var binlogs []binarylog.MethodLogger + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + binlogs = append(binlogs, ml) + } + } + if len(binlogs) != 0 { + ctx := stream.Context() + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ctx); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range binlogs { + binlog.Log(ctx, logEntry) + } + } + + // comp and cp are used for compression. decomp and dc are used for + // decompression. If comp and decomp are both set, they are the same; + // however they are kept separate to ensure that at most one of the + // compressor/decompressor variable pairs are set for use later. + var comp, decomp encoding.Compressor + var cp Compressor + var dc Decompressor + var sendCompressorName string + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + decomp = encoding.GetCompressor(rc) + if decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(stream, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + cp = s.opts.cp + sendCompressorName = cp.Type() + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + comp = encoding.GetCompressor(rc) + if comp != nil { + sendCompressorName = comp.Name() + } + } + + if sendCompressorName != "" { + if err := stream.SetSendCompress(sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) + } + } + + var payInfo *payloadInfo + if len(shs) != 0 || len(binlogs) != 0 { + payInfo = &payloadInfo{} + } + d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + if err != nil { + if e := t.WriteStatus(stream, status.Convert(err)); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + return err + } + if channelz.IsOn() { + t.IncrMsgRecv() + } + df := func(v any) error { + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + for _, sh := range shs { + sh.HandleRPC(stream.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: v, + Length: len(d), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Data: d, + }) + } + if len(binlogs) != 0 { + cm := &binarylog.ClientMessage{ + Message: d, + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), cm) + } + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + if len(binlogs) != 0 { + if h, _ := stream.Header(); h.Len() > 0 { + // Only log serverHeader if there was header. Otherwise it can + // be trailer only. + sh := &binarylog.ServerHeader{ + Header: h, + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), sh) + } + } + st := &binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), st) + } + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{Last: true} + + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if stream.SendCompress() != sendCompressorName { + comp = encoding.GetCompressor(stream.SendCompress()) + } + if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if sts, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, sts); e != nil { + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } + if len(binlogs) != 0 { + h, _ := stream.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + st := &binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), sh) + binlog.Log(stream.Context(), st) + } + } + return err + } + if len(binlogs) != 0 { + h, _ := stream.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + sm := &binarylog.ServerMessage{ + Message: reply, + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), sh) + binlog.Log(stream.Context(), sm) + } + } + if channelz.IsOn() { + t.IncrMsgSent() + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + if len(binlogs) != 0 { + st := &binarylog.ServerTrailer{ + Trailer: stream.Trailer(), + Err: appErr, + } + for _, binlog := range binlogs { + binlog.Log(stream.Context(), st) + } + } + return t.WriteStatus(stream, statusOK) +} + +// chainStreamServerInterceptors chains all stream server interceptors into one. +func chainStreamServerInterceptors(s *Server) { + // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainStreamInts + if s.opts.streamInt != nil { + interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...) + } + + var chainedInt StreamServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = chainStreamInterceptors(interceptors) + } + + s.opts.streamInt = chainedInt +} + +func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { + return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(srv any, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + } +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + } + shs := s.opts.statsHandlers + var statsBegin *stats.Begin + if len(shs) != 0 { + beginTime := time.Now() + statsBegin = &stats.Begin{ + BeginTime: beginTime, + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + for _, sh := range shs { + sh.HandleRPC(stream.Context(), statsBegin) + } + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ss := &serverStream{ + ctx: ctx, + t: t, + s: stream, + p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, + codec: s.getCodec(stream.ContentSubtype()), + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: shs, + } + + if len(shs) != 0 || trInfo != nil || channelz.IsOn() { + // See comment in processUnaryRPC on defers. + defer func() { + if trInfo != nil { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + } + + if len(shs) != 0 { + end := &stats.End{ + BeginTime: statsBegin.BeginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + for _, sh := range shs { + sh.HandleRPC(stream.Context(), end) + } + } + + if channelz.IsOn() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + } + }() + } + + if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + if s.opts.binaryLogger != nil { + if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil { + ss.binlogs = append(ss.binlogs, ml) + } + } + if len(ss.binlogs) != 0 { + md, _ := metadata.FromIncomingContext(ctx) + logEntry := &binarylog.ClientHeader{ + Header: md, + MethodName: stream.Method(), + PeerAddr: nil, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + if a := md[":authority"]; len(a) > 0 { + logEntry.Authority = a[0] + } + if peer, ok := peer.FromContext(ss.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range ss.binlogs { + binlog.Log(stream.Context(), logEntry) + } + } + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + ss.dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + ss.decomp = encoding.GetCompressor(rc) + if ss.decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(ss.s, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + ss.cp = s.opts.cp + ss.sendCompressorName = s.opts.cp.Type() + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + ss.comp = encoding.GetCompressor(rc) + if ss.comp != nil { + ss.sendCompressorName = rc + } + } + + if ss.sendCompressorName != "" { + if err := stream.SetSendCompress(ss.sendCompressorName); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err) + } + } + + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + } + var appErr error + var server any + if info != nil { + server = info.serviceImpl + } + if s.opts.streamInt == nil { + appErr = sd.Handler(server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(server, ss, info, sd.Handler) + } + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + ss.trInfo.tr.SetError() + ss.mu.Unlock() + } + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + } + for _, binlog := range ss.binlogs { + binlog.Log(stream.Context(), st) + } + } + t.WriteStatus(ss.s, appStatus) + // TODO: Should we log an error from WriteStatus here and below? + return appErr + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer("OK"), false) + ss.mu.Unlock() + } + if len(ss.binlogs) != 0 { + st := &binarylog.ServerTrailer{ + Trailer: ss.s.Trailer(), + Err: appErr, + } + for _, binlog := range ss.binlogs { + binlog.Log(stream.Context(), st) + } + } + return t.WriteStatus(ss.s, statusOK) +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + trInfo.tr.SetError() + } + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + + srv, knownService := s.services[service] + if knownService { + if md, ok := srv.methods[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) + return + } + if sd, ok := srv.streams[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return + } + } + // Unknown service, or known server unknown method. + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } + var errDesc string + if !knownService { + errDesc = fmt.Sprintf("unknown service %v", service) + } else { + errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) + } + if trInfo != nil { + trInfo.tr.LazyPrintf("%s", errDesc) + trInfo.tr.SetError() + } + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + trInfo.tr.SetError() + } + channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } +} + +// The key to save ServerTransportStream in the context. +type streamKey struct{} + +// NewContextWithServerTransportStream creates a new context from ctx and +// attaches stream to it. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// ServerTransportStream is a minimal interface that a transport stream must +// implement. This can be used to mock an actual transport stream for tests of +// handler code that use, for example, grpc.SetHeader (which requires some +// stream to be in context). +// +// See also NewContextWithServerTransportStream. +// +// # Experimental +// +// Notice: This type is EXPERIMENTAL and may be changed or removed in a +// later release. +type ServerTransportStream interface { + Method() string + SetHeader(md metadata.MD) error + SendHeader(md metadata.MD) error + SetTrailer(md metadata.MD) error +} + +// ServerTransportStreamFromContext returns the ServerTransportStream saved in +// ctx. Returns nil if the given context has no stream associated with it +// (which implies it is not an RPC invocation context). +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { + s, _ := ctx.Value(streamKey{}).(ServerTransportStream) + return s +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { + s.quit.Fire() + + defer func() { + s.serveWG.Wait() + s.done.Fire() + }() + + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) + + s.mu.Lock() + listeners := s.lis + s.lis = nil + conns := s.conns + s.conns = nil + // interrupt GracefulStop if Stop and GracefulStop are called concurrently. + s.cv.Broadcast() + s.mu.Unlock() + + for lis := range listeners { + lis.Close() + } + for _, cs := range conns { + for st := range cs { + st.Close(errors.New("Server.Stop called")) + } + } + if s.opts.numServerWorkers > 0 { + s.stopServerWorkers() + } + + s.mu.Lock() + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.quit.Fire() + defer s.done.Fire() + + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + return + } + + for lis := range s.lis { + lis.Close() + } + s.lis = nil + if !s.drain { + for _, conns := range s.conns { + for st := range conns { + st.Drain("graceful_stop") + } + } + s.drain = true + } + + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. + s.mu.Unlock() + s.serveWG.Wait() + s.mu.Lock() + + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// contentSubtype must be lowercase +// cannot return nil +func (s *Server) getCodec(contentSubtype string) baseCodec { + if s.opts.codec != nil { + return s.opts.codec + } + if contentSubtype == "" { + return encoding.GetCodec(proto.Name) + } + codec := encoding.GetCodec(contentSubtype) + if codec == nil { + return encoding.GetCodec(proto.Name) + } + return codec +} + +// SetHeader sets the header metadata to be sent from the server to the client. +// The context provided must be the context passed to the server's handler. +// +// Streaming RPCs should prefer the SetHeader method of the ServerStream. +// +// When called multiple times, all the provided metadata will be merged. All +// the metadata will be sent out when one of the following happens: +// +// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader. +// - The first response message is sent. For unary handlers, this occurs when +// the handler returns; for streaming handlers, this can happen when stream's +// SendMsg method is called. +// - An RPC status is sent out (error or success). This occurs when the handler +// returns. +// +// SetHeader will fail if called after any of the events above. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. +func SetHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetHeader(md) +} + +// SendHeader sends header metadata. It may be called at most once, and may not +// be called after any event that causes headers to be sent (see SetHeader for +// a complete list). The provided md and headers set by SetHeader() will be +// sent. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. +func SendHeader(ctx context.Context, md metadata.MD) error { + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + if err := stream.SendHeader(md); err != nil { + return toRPCErr(err) + } + return nil +} + +// SetSendCompressor sets a compressor for outbound messages from the server. +// It must not be called after any event that causes headers to be sent +// (see ServerStream.SetHeader for the complete list). Provided compressor is +// used when below conditions are met: +// +// - compressor is registered via encoding.RegisterCompressor +// - compressor name must exist in the client advertised compressor names +// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to +// get client supported compressor names. +// +// The context provided must be the context passed to the server's handler. +// It must be noted that compressor name encoding.Identity disables the +// outbound compression. +// By default, server messages will be sent using the same compressor with +// which request messages were sent. +// +// It is not safe to call SetSendCompressor concurrently with SendHeader and +// SendMsg. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func SetSendCompressor(ctx context.Context, name string) error { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return fmt.Errorf("failed to fetch the stream from the given context") + } + + if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil { + return fmt.Errorf("unable to set send compressor: %w", err) + } + + return stream.SetSendCompress(name) +} + +// ClientSupportedCompressors returns compressor names advertised by the client +// via grpc-accept-encoding header. +// +// The context provided must be the context passed to the server's handler. +// +// # Experimental +// +// Notice: This function is EXPERIMENTAL and may be changed or removed in a +// later release. +func ClientSupportedCompressors(ctx context.Context) ([]string, error) { + stream, ok := ServerTransportStreamFromContext(ctx).(*transport.Stream) + if !ok || stream == nil { + return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx) + } + + return strings.Split(stream.ClientAdvertisedCompressors(), ","), nil +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// When called more than once, all the provided metadata will be merged. +// +// The error returned is compatible with the status package. However, the +// status code will often not match the RPC status as seen by the client +// application, and therefore, should not be relied upon for this purpose. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} + +// Method returns the method string for the server context. The returned +// string is in the format of "/service/method". +func Method(ctx context.Context) (string, bool) { + s := ServerTransportStreamFromContext(ctx) + if s == nil { + return "", false + } + return s.Method(), true +} + +type channelzServer struct { + s *Server +} + +func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { + return c.s.channelzMetric() +} + +// validateSendCompressor returns an error when given compressor name cannot be +// handled by the server or the client based on the advertised compressors. +func validateSendCompressor(name, clientCompressors string) error { + if name == encoding.Identity { + return nil + } + + if !grpcutil.IsCompressorNameRegistered(name) { + return fmt.Errorf("compressor not registered %q", name) + } + + for _, c := range strings.Split(clientCompressors, ",") { + if c == name { + return nil // found match + } + } + return fmt.Errorf("client does not support compressor %q", name) +} + +// atomicSemaphore implements a blocking, counting semaphore. acquire should be +// called synchronously; release may be called asynchronously. +type atomicSemaphore struct { + n atomic.Int64 + wait chan struct{} +} + +func (q *atomicSemaphore) acquire() { + if q.n.Add(-1) < 0 { + // We ran out of quota. Block until a release happens. + <-q.wait + } +} + +func (q *atomicSemaphore) release() { + // N.B. the "<= 0" check below should allow for this to work with multiple + // concurrent calls to acquire, but also note that with synchronous calls to + // acquire, as our system does, n will never be less than -1. There are + // fairness issues (queuing) to consider if this was to be generalized. + if q.n.Add(1) <= 0 { + // An acquire was waiting on us. Unblock it. + q.wait <- struct{}{} + } +} + +func newHandlerQuota(n uint32) *atomicSemaphore { + a := &atomicSemaphore{wait: make(chan struct{}, 1)} + a.n.Store(int64(n)) + return a +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 0000000000..0df11fc098 --- /dev/null +++ b/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,347 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal" + internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" + "google.golang.org/grpc/serviceconfig" +) + +const maxInt = int(^uint(0) >> 1) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig = internalserviceconfig.MethodConfig + +type lbConfig struct { + name string + cfg serviceconfig.LoadBalancingConfig +} + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { + serviceconfig.Config + + // LB is the load balancer the service providers recommends. This is + // deprecated; lbConfigs is preferred. If lbConfig and LB are both present, + // lbConfig will be used. + LB *string + + // lbConfig is the service config's load balancing configuration. If + // lbConfig and LB are both present, lbConfig will be used. + lbConfig *lbConfig + + // Methods contains a map for the methods in this service. If there is an + // exact match for a method (i.e. /service/method) in the map, use the + // corresponding MethodConfig. If there's no exact match, look for the + // default config for the service (/service/) and use the corresponding + // MethodConfig if it exists. Otherwise, the method has no MethodConfig to + // use. + Methods map[string]MethodConfig + + // If a retryThrottlingPolicy is provided, gRPC will automatically throttle + // retry attempts and hedged RPCs when the client’s ratio of failures to + // successes exceeds a threshold. + // + // For each server name, the gRPC client will maintain a token_count which is + // initially set to maxTokens, and can take values between 0 and maxTokens. + // + // Every outgoing RPC (regardless of service or method invoked) will change + // token_count as follows: + // + // - Every failed RPC will decrement the token_count by 1. + // - Every successful RPC will increment the token_count by tokenRatio. + // + // If token_count is less than or equal to maxTokens / 2, then RPCs will not + // be retried and hedged RPCs will not be sent. + retryThrottling *retryThrottlingPolicy + // healthCheckConfig must be set as one of the requirement to enable LB channel + // health check. + healthCheckConfig *healthCheckConfig + // rawJSONString stores service config json string that get parsed into + // this service config struct. + rawJSONString string +} + +// healthCheckConfig defines the go-native version of the LB channel health check config. +type healthCheckConfig struct { + // serviceName is the service name to use in the health-checking request. + ServiceName string +} + +type jsonRetryPolicy struct { + MaxAttempts int + InitialBackoff internalserviceconfig.Duration + MaxBackoff internalserviceconfig.Duration + BackoffMultiplier float64 + RetryableStatusCodes []codes.Code +} + +// retryThrottlingPolicy defines the go-native version of the retry throttling +// policy defined by the service config here: +// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config +type retryThrottlingPolicy struct { + // The number of tokens starts at maxTokens. The token_count will always be + // between 0 and maxTokens. + // + // This field is required and must be greater than zero. + MaxTokens float64 + // The amount of tokens to add on each successful RPC. Typically this will + // be some number between 0 and 1, e.g., 0.1. + // + // This field is required and must be greater than zero. Up to 3 decimal + // places are supported. + TokenRatio float64 +} + +type jsonName struct { + Service string + Method string +} + +var ( + errDuplicatedName = errors.New("duplicated name") + errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'") +) + +func (j jsonName) generatePath() (string, error) { + if j.Service == "" { + if j.Method != "" { + return "", errEmptyServiceNonEmptyMethod + } + return "", nil + } + res := "/" + j.Service + "/" + if j.Method != "" { + res += j.Method + } + return res, nil +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { + Name *[]jsonName + WaitForReady *bool + Timeout *internalserviceconfig.Duration + MaxRequestMessageBytes *int64 + MaxResponseMessageBytes *int64 + RetryPolicy *jsonRetryPolicy +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { + LoadBalancingPolicy *string + LoadBalancingConfig *internalserviceconfig.BalancerConfig + MethodConfig *[]jsonMC + RetryThrottling *retryThrottlingPolicy + HealthCheckConfig *healthCheckConfig +} + +func init() { + internal.ParseServiceConfig = parseServiceConfig +} +func parseServiceConfig(js string) *serviceconfig.ParseResult { + if len(js) == 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")} + } + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + sc := ServiceConfig{ + LB: rsc.LoadBalancingPolicy, + Methods: make(map[string]MethodConfig), + retryThrottling: rsc.RetryThrottling, + healthCheckConfig: rsc.HealthCheckConfig, + rawJSONString: js, + } + if c := rsc.LoadBalancingConfig; c != nil { + sc.lbConfig = &lbConfig{ + name: c.Name, + cfg: c.Config, + } + } + + if rsc.MethodConfig == nil { + return &serviceconfig.ParseResult{Config: &sc} + } + + paths := map[string]struct{}{} + for _, m := range *rsc.MethodConfig { + if m.Name == nil { + continue + } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, + Timeout: (*time.Duration)(m.Timeout), + } + if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) + return &serviceconfig.ParseResult{Err: err} + } + if m.MaxRequestMessageBytes != nil { + if *m.MaxRequestMessageBytes > int64(maxInt) { + mc.MaxReqSize = newInt(maxInt) + } else { + mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) + } + } + if m.MaxResponseMessageBytes != nil { + if *m.MaxResponseMessageBytes > int64(maxInt) { + mc.MaxRespSize = newInt(maxInt) + } else { + mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) + } + } + for i, n := range *m.Name { + path, err := n.generatePath() + if err != nil { + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + + if _, ok := paths[path]; ok { + err = errDuplicatedName + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) + return &serviceconfig.ParseResult{Err: err} + } + paths[path] = struct{}{} + sc.Methods[path] = mc + } + } + + if sc.retryThrottling != nil { + if mt := sc.retryThrottling.MaxTokens; mt <= 0 || mt > 1000 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: maxTokens (%v) out of range (0, 1000]", mt)} + } + if tr := sc.retryThrottling.TokenRatio; tr <= 0 { + return &serviceconfig.ParseResult{Err: fmt.Errorf("invalid retry throttling config: tokenRatio (%v) may not be negative", tr)} + } + } + return &serviceconfig.ParseResult{Config: &sc} +} + +func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) { + if jrp == nil { + return nil, nil + } + + if jrp.MaxAttempts <= 1 || + jrp.InitialBackoff <= 0 || + jrp.MaxBackoff <= 0 || + jrp.BackoffMultiplier <= 0 || + len(jrp.RetryableStatusCodes) == 0 { + logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) + return nil, nil + } + + rp := &internalserviceconfig.RetryPolicy{ + MaxAttempts: jrp.MaxAttempts, + InitialBackoff: time.Duration(jrp.InitialBackoff), + MaxBackoff: time.Duration(jrp.MaxBackoff), + BackoffMultiplier: jrp.BackoffMultiplier, + RetryableStatusCodes: make(map[codes.Code]bool), + } + if rp.MaxAttempts > 5 { + // TODO(retry): Make the max maxAttempts configurable. + rp.MaxAttempts = 5 + } + for _, code := range jrp.RetryableStatusCodes { + rp.RetryableStatusCodes[code] = true + } + return rp, nil +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +func newInt(b int) *int { + return &b +} + +func init() { + internal.EqualServiceConfigForTesting = equalServiceConfig +} + +// equalServiceConfig compares two configs. The rawJSONString field is ignored, +// because they may diff in white spaces. +// +// If any of them is NOT *ServiceConfig, return false. +func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } + aa, ok := a.(*ServiceConfig) + if !ok { + return false + } + bb, ok := b.(*ServiceConfig) + if !ok { + return false + } + aaRaw := aa.rawJSONString + aa.rawJSONString = "" + bbRaw := bb.rawJSONString + bb.rawJSONString = "" + defer func() { + aa.rawJSONString = aaRaw + bb.rawJSONString = bbRaw + }() + // Using reflect.DeepEqual instead of cmp.Equal because many balancer + // configs are unexported, and cmp.Equal cannot compare unexported fields + // from unexported structs. + return reflect.DeepEqual(aa, bb) +} diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go new file mode 100644 index 0000000000..35e7a20a04 --- /dev/null +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -0,0 +1,44 @@ +/* + * + * Copyright 2019 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package serviceconfig defines types and methods for operating on gRPC +// service configs. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package serviceconfig + +// Config represents an opaque data structure holding a service config. +type Config interface { + isServiceConfig() +} + +// LoadBalancingConfig represents an opaque data structure holding a load +// balancing config. +type LoadBalancingConfig interface { + isLoadBalancingConfig() +} + +// ParseResult contains a service config or an error. Exactly one must be +// non-nil. +type ParseResult struct { + Config Config + Err error +} diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go new file mode 100644 index 0000000000..48a64cfe8e --- /dev/null +++ b/vendor/google.golang.org/grpc/shared_buffer_pool.go @@ -0,0 +1,154 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import "sync" + +// SharedBufferPool is a pool of buffers that can be shared, resulting in +// decreased memory allocation. Currently, in gRPC-go, it is only utilized +// for parsing incoming messages. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +type SharedBufferPool interface { + // Get returns a buffer with specified length from the pool. + // + // The returned byte slice may be not zero initialized. + Get(length int) []byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +// NewSharedBufferPool creates a simple SharedBufferPool with buckets +// of different sizes to optimize memory usage. This prevents the pool from +// wasting large amounts of memory, even when handling messages of varying sizes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewSharedBufferPool() SharedBufferPool { + return &simpleSharedBufferPool{ + pools: [poolArraySize]simpleSharedBufferChildPool{ + newBytesPool(level0PoolMaxSize), + newBytesPool(level1PoolMaxSize), + newBytesPool(level2PoolMaxSize), + newBytesPool(level3PoolMaxSize), + newBytesPool(level4PoolMaxSize), + newBytesPool(0), + }, + } +} + +// simpleSharedBufferPool is a simple implementation of SharedBufferPool. +type simpleSharedBufferPool struct { + pools [poolArraySize]simpleSharedBufferChildPool +} + +func (p *simpleSharedBufferPool) Get(size int) []byte { + return p.pools[p.poolIdx(size)].Get(size) +} + +func (p *simpleSharedBufferPool) Put(bs *[]byte) { + p.pools[p.poolIdx(cap(*bs))].Put(bs) +} + +func (p *simpleSharedBufferPool) poolIdx(size int) int { + switch { + case size <= level0PoolMaxSize: + return level0PoolIdx + case size <= level1PoolMaxSize: + return level1PoolIdx + case size <= level2PoolMaxSize: + return level2PoolIdx + case size <= level3PoolMaxSize: + return level3PoolIdx + case size <= level4PoolMaxSize: + return level4PoolIdx + default: + return levelMaxPoolIdx + } +} + +const ( + level0PoolMaxSize = 16 // 16 B + level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B + level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB + level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB + level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB +) + +const ( + level0PoolIdx = iota + level1PoolIdx + level2PoolIdx + level3PoolIdx + level4PoolIdx + levelMaxPoolIdx + poolArraySize +) + +type simpleSharedBufferChildPool interface { + Get(size int) []byte + Put(any) +} + +type bufferPool struct { + sync.Pool + + defaultSize int +} + +func (p *bufferPool) Get(size int) []byte { + bs := p.Pool.Get().(*[]byte) + + if cap(*bs) < size { + p.Pool.Put(bs) + + return make([]byte, size) + } + + return (*bs)[:size] +} + +func newBytesPool(size int) simpleSharedBufferChildPool { + return &bufferPool{ + Pool: sync.Pool{ + New: func() any { + bs := make([]byte, size) + return &bs + }, + }, + defaultSize: size, + } +} + +// nopBufferPool is a buffer pool just makes new buffer without pooling. +type nopBufferPool struct { +} + +func (nopBufferPool) Get(length int) []byte { + return make([]byte, length) +} + +func (nopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go new file mode 100644 index 0000000000..dc03731e45 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "context" + "net" +) + +// ConnTagInfo defines the relevant information needed by connection context tagger. +type ConnTagInfo struct { + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// RPCTagInfo defines the relevant information needed by RPC context tagger. +type RPCTagInfo struct { + // FullMethodName is the RPC method in the format of /package.service/method. + FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool +} + +// Handler defines the interface for the related stats handling (e.g., RPCs, connections). +type Handler interface { + // TagRPC can attach some information to the given context. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. + TagRPC(context.Context, *RPCTagInfo) context.Context + // HandleRPC processes the RPC stats. + HandleRPC(context.Context, RPCStats) + + // TagConn can attach some information to the given context. + // The returned context will be used for stats handling. + // For conn stats handling, the context used in HandleConn for this + // connection will be derived from the context returned. + // For RPC stats handling, + // - On server side, the context used in HandleRPC for all RPCs on this + // connection will be derived from the context returned. + // - On client side, the context is not derived from the context returned. + TagConn(context.Context, *ConnTagInfo) context.Context + // HandleConn processes the Conn stats. + HandleConn(context.Context, ConnStats) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go new file mode 100644 index 0000000000..4ab70e2d46 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -0,0 +1,343 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package stats is for collecting and reporting various network and RPC stats. +// This package is for monitoring purpose only. All fields are read-only. +// All APIs are experimental. +package stats // import "google.golang.org/grpc/stats" + +import ( + "context" + "net" + "time" + + "google.golang.org/grpc/metadata" +) + +// RPCStats contains stats information about RPCs. +type RPCStats interface { + isRPCStats() + // IsClient returns true if this RPCStats is from client side. + IsClient() bool +} + +// Begin contains stats when an RPC attempt begins. +// FailFast is only valid if this Begin is from client side. +type Begin struct { + // Client is true if this Begin is from client side. + Client bool + // BeginTime is the time when the RPC attempt begins. + BeginTime time.Time + // FailFast indicates if this RPC is failfast. + FailFast bool + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool + // IsTransparentRetryAttempt indicates whether this attempt was initiated + // due to transparently retrying a previous attempt. + IsTransparentRetryAttempt bool +} + +// IsClient indicates if the stats information is from client side. +func (s *Begin) IsClient() bool { return s.Client } + +func (s *Begin) isRPCStats() {} + +// PickerUpdated indicates that the LB policy provided a new picker while the +// RPC was waiting for one. +type PickerUpdated struct{} + +// IsClient indicates if the stats information is from client side. Only Client +// Side interfaces with a Picker, thus always returns true. +func (*PickerUpdated) IsClient() bool { return true } + +func (*PickerUpdated) isRPCStats() {} + +// InPayload contains the information for an incoming payload. +type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload any + // Data is the serialized message payload. + Data []byte + + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). + Length int + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. + WireLength int + + // RecvTime is the time when the payload is received. + RecvTime time.Time +} + +// IsClient indicates if the stats information is from client side. +func (s *InPayload) IsClient() bool { return s.Client } + +func (s *InPayload) isRPCStats() {} + +// InHeader contains stats when a header is received. +type InHeader struct { + // Client is true if this InHeader is from client side. + Client bool + // WireLength is the wire length of header. + WireLength int + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata received. + Header metadata.MD + + // The following fields are valid only if Client is false. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// IsClient indicates if the stats information is from client side. +func (s *InHeader) IsClient() bool { return s.Client } + +func (s *InHeader) isRPCStats() {} + +// InTrailer contains stats when a trailer is received. +type InTrailer struct { + // Client is true if this InTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this InTrailer is from the client side. + Trailer metadata.MD +} + +// IsClient indicates if the stats information is from client side. +func (s *InTrailer) IsClient() bool { return s.Client } + +func (s *InTrailer) isRPCStats() {} + +// OutPayload contains the information for an outgoing payload. +type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload any + // Data is the serialized message payload. + Data []byte + // Length is the size of the uncompressed payload data. Does not include any + // framing (gRPC or HTTP/2). + Length int + // CompressedLength is the size of the compressed payload data. Does not + // include any framing (gRPC or HTTP/2). Same as Length if compression not + // enabled. + CompressedLength int + // WireLength is the size of the compressed payload data plus gRPC framing. + // Does not include HTTP/2 framing. + WireLength int + // SentTime is the time when the payload is sent. + SentTime time.Time +} + +// IsClient indicates if this stats information is from client side. +func (s *OutPayload) IsClient() bool { return s.Client } + +func (s *OutPayload) isRPCStats() {} + +// OutHeader contains stats when a header is sent. +type OutHeader struct { + // Client is true if this OutHeader is from client side. + Client bool + // Compression is the compression algorithm used for the RPC. + Compression string + // Header contains the header metadata sent. + Header metadata.MD + + // The following fields are valid only if Client is true. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// IsClient indicates if this stats information is from client side. +func (s *OutHeader) IsClient() bool { return s.Client } + +func (s *OutHeader) isRPCStats() {} + +// OutTrailer contains stats when a trailer is sent. +type OutTrailer struct { + // Client is true if this OutTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + // + // Deprecated: This field is never set. The length is not known when this message is + // emitted because the trailer fields are compressed with hpack after that. + WireLength int + // Trailer contains the trailer metadata sent to the client. This + // field is only valid if this OutTrailer is from the server side. + Trailer metadata.MD +} + +// IsClient indicates if this stats information is from client side. +func (s *OutTrailer) IsClient() bool { return s.Client } + +func (s *OutTrailer) isRPCStats() {} + +// End contains stats when an RPC ends. +type End struct { + // Client is true if this End is from client side. + Client bool + // BeginTime is the time when the RPC began. + BeginTime time.Time + // EndTime is the time when the RPC ends. + EndTime time.Time + // Trailer contains the trailer metadata received from the server. This + // field is only valid if this End is from the client side. + // Deprecated: use Trailer in InTrailer instead. + Trailer metadata.MD + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. + Error error +} + +// IsClient indicates if this is from client side. +func (s *End) IsClient() bool { return s.Client } + +func (s *End) isRPCStats() {} + +// ConnStats contains stats information about connections. +type ConnStats interface { + isConnStats() + // IsClient returns true if this ConnStats is from client side. + IsClient() bool +} + +// ConnBegin contains the stats of a connection when it is established. +type ConnBegin struct { + // Client is true if this ConnBegin is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnBegin) IsClient() bool { return s.Client } + +func (s *ConnBegin) isConnStats() {} + +// ConnEnd contains the stats of a connection when it ends. +type ConnEnd struct { + // Client is true if this ConnEnd is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnEnd) IsClient() bool { return s.Client } + +func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 0000000000..a93360efb8 --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,162 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "context" + "errors" + "fmt" + + spb "google.golang.org/genproto/googleapis/rpc/status" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/status" +) + +// Status references google.golang.org/grpc/internal/status. It represents an +// RPC status code, message, and details. It is immutable and should be +// created with New, Newf, or FromProto. +// https://godoc.org/google.golang.org/grpc/internal/status +type Status = status.Status + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return status.New(c, msg) +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...any) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg. If c is OK, returns nil. +func Error(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...any) error { + return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s. If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { + return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return status.FromProto(s) +} + +// FromError returns a Status representation of err. +// +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type +// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped +// errors, the message returned contains the entire err.Error() text and not +// just the wrapped status. In that case, ok is true. +// +// - If err is nil, a Status is returned with codes.OK and no message, and ok +// is true. +// +// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` +// returns nil (which maps to Codes.OK), or if err wraps a type +// satisfying this, a Status is returned with codes.Unknown and err's +// Error() message, and ok is false. +// +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return nil, true + } + type grpcstatus interface{ GRPCStatus() *Status } + if gs, ok := err.(grpcstatus); ok { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error has status nil, which maps to codes.OK. There + // is no sensible behavior for this, so we turn it into + // an error with codes.Unknown and discard the existing + // status. + return New(codes.Unknown, err.Error()), false + } + return grpcStatus, true + } + var gs grpcstatus + if errors.As(err, &gs) { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { + // Error wraps an error that has status nil, which maps + // to codes.OK. There is no sensible behavior for this, + // so we turn it into an error with codes.Unknown and + // discard the existing status. + return New(codes.Unknown, err.Error()), false + } + p := grpcStatus.Proto() + p.Message = err.Error() + return status.FromProto(p), true + } + return New(codes.Unknown, err.Error()), false +} + +// Convert is a convenience function which removes the need to handle the +// boolean return value from FromError. +func Convert(err error) *Status { + s, _ := FromError(err) + return s +} + +// Code returns the Code of the error if it is a Status error or if it wraps a +// Status error. If that is not the case, it returns codes.OK if err is nil, or +// codes.Unknown otherwise. +func Code(err error) codes.Code { + // Don't use FromError to avoid allocation of OK status. + if err == nil { + return codes.OK + } + + return Convert(err).Code() +} + +// FromContextError converts a context error or wrapped context error into a +// Status. It returns a Status with codes.OK if err is nil, or a Status with +// codes.Unknown if err is non-nil and not a context error. +func FromContextError(err error) *Status { + if err == nil { + return nil + } + if errors.Is(err, context.DeadlineExceeded) { + return New(codes.DeadlineExceeded, err.Error()) + } + if errors.Is(err, context.Canceled) { + return New(codes.Canceled, err.Error()) + } + return New(codes.Unknown, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 0000000000..b14b2fbea2 --- /dev/null +++ b/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,1780 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "errors" + "io" + "math" + "strconv" + "sync" + "time" + + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancerload" + "google.golang.org/grpc/internal/binarylog" + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" + iresolver "google.golang.org/grpc/internal/resolver" + "google.golang.org/grpc/internal/serviceconfig" + istatus "google.golang.org/grpc/internal/status" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. +type StreamHandler func(srv any, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. Used +// on the server when registering services and on the client when initiating +// new streams. +type StreamDesc struct { + // StreamName and Handler are only used when registering handlers on a + // server. + StreamName string // the name of the method excluding the service + Handler StreamHandler // the handler called for the method + + // ServerStreams and ClientStreams are used for registering handlers on a + // server as well as defining RPC behavior when passed to NewClientStream + // and ClientConn.NewStream. At least one must be true. + ServerStreams bool // indicates the server can perform streaming sends + ClientStreams bool // indicates the client can perform streaming sends +} + +// Stream defines the common interface a client or server stream has to satisfy. +// +// Deprecated: See ClientStream and ServerStream documentation instead. +type Stream interface { + // Deprecated: See ClientStream and ServerStream documentation instead. + Context() context.Context + // Deprecated: See ClientStream and ServerStream documentation instead. + SendMsg(m any) error + // Deprecated: See ClientStream and ServerStream documentation instead. + RecvMsg(m any) error +} + +// ClientStream defines the client-side behavior of a streaming RPC. +// +// All errors returned from ClientStream methods are compatible with the +// status package. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. If the metadata + // is nil and the error is also nil, then the stream was terminated without + // headers, and the status can be discovered by calling RecvMsg. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. It is also not safe to call CloseSend + // concurrently with SendMsg. + CloseSend() error + // Context returns the context for this stream. + // + // It should not be called until after Header or RecvMsg has returned. Once + // called, subsequent client-side retries are disabled. + Context() context.Context + // SendMsg is generally called by generated code. On error, SendMsg aborts + // the stream. If the error was generated by the client, the status is + // returned directly; otherwise, io.EOF is returned and the status of + // the stream may be discovered using RecvMsg. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the server. An + // untimely stream closure may result in lost messages. To ensure delivery, + // users should ensure the RPC completed successfully using RecvMsg. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. It is also + // not safe to call CloseSend concurrently with SendMsg. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. + SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the stream completes successfully. On + // any other error, the stream is aborted and the error contains the RPC + // status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m any) error +} + +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. ctx is used for the lifetime of the stream. +// +// To ensure resources are not leaked due to the stream returned, one of the following +// actions must be performed: +// +// 1. Call Close on the ClientConn. +// 2. Cancel the context provided. +// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated +// client-streaming RPC, for instance, might use the helper function +// CloseAndRecv (note that CloseSend does not Recv, therefore is not +// guaranteed to release all resources). +// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg. +// +// If none of the above happen, a goroutine and a context will be leaked, and grpc +// will not call the optionally-configured stats handler with a stats.End message. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.streamInt != nil { + return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) + } + return newClientStream(ctx, desc, cc, method, opts...) +} + +// NewClientStream is a wrapper for ClientConn.NewStream. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return cc.NewStream(ctx, desc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + // Start tracking the RPC for idleness purposes. This is where a stream is + // created for both streaming and unary RPCs, and hence is a good place to + // track active RPC count. + if err := cc.idlenessMgr.OnCallBegin(); err != nil { + return nil, err + } + // Add a calloption, to decrement the active call count, that gets executed + // when the RPC completes. + opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) + + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + // validate md + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + // validate added + for _, kvs := range added { + for i := 0; i < len(kvs); i += 2 { + if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } + } + } + if channelz.IsOn() { + cc.incrCallsStarted() + defer func() { + if err != nil { + cc.incrCallsFailed() + } + }() + } + // Provide an opportunity for the first RPC to see the first service config + // provided by the resolver. + if err := cc.waitForResolvedAddrs(ctx); err != nil { + return nil, err + } + + var mc serviceconfig.MethodConfig + var onCommit func() + var newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, opts...) + } + + rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} + rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) + if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) + } + return nil, err + } + return nil, toRPCErr(err) + } + + if rpcConfig != nil { + if rpcConfig.Context != nil { + ctx = rpcConfig.Context + } + mc = rpcConfig.MethodConfig + onCommit = rpcConfig.OnCommitted + if rpcConfig.Interceptor != nil { + rpcInfo.Context = nil + ns := newStream + newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) { + cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns) + if err != nil { + return nil, toRPCErr(err) + } + return cs, nil + } + } + } + + return newStream(ctx, func() {}) +} + +func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { + c := defaultCallInfo() + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + var cancel context.CancelFunc + if mc.Timeout != nil && *mc.Timeout >= 0 { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + DoneFunc: doneFunc, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cp = cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + cs := &clientStream{ + callHdr: callHdr, + ctx: ctx, + methodConfig: &mc, + opts: opts, + callInfo: c, + cc: cc, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + cancel: cancel, + firstAttempt: true, + onCommit: onCommit, + } + if !cc.dopts.disableRetry { + cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler) + } + if ml := binarylog.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + if cc.dopts.binaryLogger != nil { + if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil { + cs.binlogs = append(cs.binlogs, ml) + } + } + + // Pick the transport to use and create a new stream on the transport. + // Assign cs.attempt upon success. + op := func(a *csAttempt) error { + if err := a.getTransport(); err != nil { + return err + } + if err := a.newStream(); err != nil { + return err + } + // Because this operation is always called either here (while creating + // the clientStream) or by the retry code while locked when replaying + // the operation, it is safe to access cs.attempt directly. + cs.attempt = a + return nil + } + if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil { + return nil, err + } + + if len(cs.binlogs) != 0 { + md, _ := metadata.FromOutgoingContext(ctx) + logEntry := &binarylog.ClientHeader{ + OnClientSide: true, + Header: md, + MethodName: method, + Authority: cs.cc.authority, + } + if deadline, ok := ctx.Deadline(); ok { + logEntry.Timeout = time.Until(deadline) + if logEntry.Timeout < 0 { + logEntry.Timeout = 0 + } + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } + } + + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + case <-ctx.Done(): + cs.finish(toRPCErr(ctx.Err())) + } + }() + } + return cs, nil +} + +// newAttemptLocked creates a new csAttempt without a transport or stream. +func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) { + if err := cs.ctx.Err(); err != nil { + return nil, toRPCErr(err) + } + if err := cs.cc.ctx.Err(); err != nil { + return nil, ErrClientConnClosing + } + + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + method := cs.callHdr.Method + var beginTime time.Time + shs := cs.cc.dopts.copts.StatsHandlers + for _, sh := range shs { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: cs.callInfo.failFast, + IsClientStream: cs.desc.ClientStreams, + IsServerStream: cs.desc.ServerStreams, + IsTransparentRetryAttempt: isTransparent, + } + sh.HandleRPC(ctx, begin) + } + + var trInfo *traceInfo + if EnableTracing { + trInfo = &traceInfo{ + tr: trace.New("grpc.Sent."+methodFamily(method), method), + firstLine: firstLine{ + client: true, + }, + } + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = time.Until(deadline) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + } + + if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { + // Add extra metadata (metadata that will be added by transport) to context + // so the balancer can see them. + ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( + "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype), + )) + } + + return &csAttempt{ + ctx: ctx, + beginTime: beginTime, + cs: cs, + dc: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, + }, nil +} + +func (a *csAttempt) getTransport() error { + cs := a.cs + + var err error + a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + if err != nil { + if de, ok := err.(dropError); ok { + err = de.error + a.drop = true + } + return err + } + if a.trInfo != nil { + a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) + } + return nil +} + +func (a *csAttempt) newStream() error { + cs := a.cs + cs.callHdr.PreviousAttempts = cs.numRetries + + // Merge metadata stored in PickResult, if any, with existing call metadata. + // It is safe to overwrite the csAttempt's context here, since all state + // maintained in it are local to the attempt. When the attempt has to be + // retried, a new instance of csAttempt will be created. + if a.pickResult.Metadata != nil { + // We currently do not have a function it the metadata package which + // merges given metadata with existing metadata in a context. Existing + // function `AppendToOutgoingContext()` takes a variadic argument of key + // value pairs. + // + // TODO: Make it possible to retrieve key value pairs from metadata.MD + // in a form passable to AppendToOutgoingContext(), or create a version + // of AppendToOutgoingContext() that accepts a metadata.MD. + md, _ := metadata.FromOutgoingContext(a.ctx) + md = metadata.Join(md, a.pickResult.Metadata) + a.ctx = metadata.NewOutgoingContext(a.ctx, md) + } + + s, err := a.t.NewStream(a.ctx, cs.callHdr) + if err != nil { + nse, ok := err.(*transport.NewStreamError) + if !ok { + // Unexpected. + return err + } + + if nse.AllowTransparentRetry { + a.allowTransparentRetry = true + } + + // Unwrap and convert error. + return toRPCErr(nse.Err) + } + a.s = s + a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} + return nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + callHdr *transport.CallHdr + opts []CallOption + callInfo *callInfo + cc *ClientConn + desc *StreamDesc + + codec baseCodec + cp Compressor + comp encoding.Compressor + + cancel context.CancelFunc // cancels all attempts + + sentLast bool // sent an end stream + + methodConfig *MethodConfig + + ctx context.Context // the application's context, wrapped by stats/tracing + + retryThrottler *retryThrottler // The throttler active when the RPC began. + + binlogs []binarylog.MethodLogger + // serverHeaderBinlogged is a boolean for whether server header has been + // logged. Server header will be logged when the first time one of those + // happens: stream.Header(), stream.Recv(). + // + // It's only read and used by Recv() and Header(), so it doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex + firstAttempt bool // if true, transparent retry is valid + numRetries int // exclusive of transparent retry attempt(s) + numRetriesSincePushback int // retries since pushback; to reset backoff + finished bool // TODO: replace with atomic cmpxchg or sync.Once? + // attempt is the active client stream attempt. + // The only place where it is written is the newAttemptLocked method and this method never writes nil. + // So, attempt can be nil only inside newClientStream function when clientStream is first created. + // One of the first things done after clientStream's creation, is to call newAttemptLocked which either + // assigns a non nil value to the attempt or returns an error. If an error is returned from newAttemptLocked, + // then newClientStream calls finish on the clientStream and returns. So, finish method is the only + // place where we need to check if the attempt is nil. + attempt *csAttempt + // TODO(hedging): hedging will have multiple attempts simultaneously. + committed bool // active attempt committed for retry? + onCommit func() + buffer []func(a *csAttempt) error // operations to replay on retry + bufferSize int // current size of buffer +} + +// csAttempt implements a single transport stream attempt within a +// clientStream. +type csAttempt struct { + ctx context.Context + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + pickResult balancer.PickResult + + finished bool + dc Decompressor + decomp encoding.Compressor + decompSet bool + + mu sync.Mutex // guards trInfo.tr + // trInfo may be nil (if EnableTracing is false). + // trInfo.tr is set when created (if EnableTracing is true), + // and cleared when the finish method is called. + trInfo *traceInfo + + statsHandlers []stats.Handler + beginTime time.Time + + // set for newStream errors that may be transparently retried + allowTransparentRetry bool + // set for pick errors that are returned as a status + drop bool +} + +func (cs *clientStream) commitAttemptLocked() { + if !cs.committed && cs.onCommit != nil { + cs.onCommit() + } + cs.committed = true + cs.buffer = nil +} + +func (cs *clientStream) commitAttempt() { + cs.mu.Lock() + cs.commitAttemptLocked() + cs.mu.Unlock() +} + +// shouldRetry returns nil if the RPC should be retried; otherwise it returns +// the error that should be returned by the operation. If the RPC should be +// retried, the bool indicates whether it is being retried transparently. +func (a *csAttempt) shouldRetry(err error) (bool, error) { + cs := a.cs + + if cs.finished || cs.committed || a.drop { + // RPC is finished or committed or was dropped by the picker; cannot retry. + return false, err + } + if a.s == nil && a.allowTransparentRetry { + return true, nil + } + // Wait for the trailers. + unprocessed := false + if a.s != nil { + <-a.s.Done() + unprocessed = a.s.Unprocessed() + } + if cs.firstAttempt && unprocessed { + // First attempt, stream unprocessed: transparently retry. + return true, nil + } + if cs.cc.dopts.disableRetry { + return false, err + } + + pushback := 0 + hasPushback := false + if a.s != nil { + if !a.s.TrailersOnly() { + return false, err + } + + // TODO(retry): Move down if the spec changes to not check server pushback + // before considering this a failure for throttling. + sps := a.s.Trailer()["grpc-retry-pushback-ms"] + if len(sps) == 1 { + var e error + if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { + channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return false, err + } + hasPushback = true + } else if len(sps) > 1 { + channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) + cs.retryThrottler.throttle() // This counts as a failure for throttling. + return false, err + } + } + + var code codes.Code + if a.s != nil { + code = a.s.Status().Code() + } else { + code = status.Code(err) + } + + rp := cs.methodConfig.RetryPolicy + if rp == nil || !rp.RetryableStatusCodes[code] { + return false, err + } + + // Note: the ordering here is important; we count this as a failure + // only if the code matched a retryable code. + if cs.retryThrottler.throttle() { + return false, err + } + if cs.numRetries+1 >= rp.MaxAttempts { + return false, err + } + + var dur time.Duration + if hasPushback { + dur = time.Millisecond * time.Duration(pushback) + cs.numRetriesSincePushback = 0 + } else { + fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback)) + cur := float64(rp.InitialBackoff) * fact + if max := float64(rp.MaxBackoff); cur > max { + cur = max + } + dur = time.Duration(grpcrand.Int63n(int64(cur))) + cs.numRetriesSincePushback++ + } + + // TODO(dfawley): we could eagerly fail here if dur puts us past the + // deadline, but unsure if it is worth doing. + t := time.NewTimer(dur) + select { + case <-t.C: + cs.numRetries++ + return false, nil + case <-cs.ctx.Done(): + t.Stop() + return false, status.FromContextError(cs.ctx.Err()).Err() + } +} + +// Returns nil if a retry was performed and succeeded; error otherwise. +func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error { + for { + attempt.finish(toRPCErr(lastErr)) + isTransparent, err := attempt.shouldRetry(lastErr) + if err != nil { + cs.commitAttemptLocked() + return err + } + cs.firstAttempt = false + attempt, err = cs.newAttemptLocked(isTransparent) + if err != nil { + // Only returns error if the clientconn is closed or the context of + // the stream is canceled. + return err + } + // Note that the first op in the replay buffer always sets cs.attempt + // if it is able to pick a transport and create a stream. + if lastErr = cs.replayBufferLocked(attempt); lastErr == nil { + return nil + } + } +} + +func (cs *clientStream) Context() context.Context { + cs.commitAttempt() + // No need to lock before using attempt, since we know it is committed and + // cannot change. + if cs.attempt.s != nil { + return cs.attempt.s.Context() + } + return cs.ctx +} + +func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error { + cs.mu.Lock() + for { + if cs.committed { + cs.mu.Unlock() + // toRPCErr is used in case the error from the attempt comes from + // NewClientStream, which intentionally doesn't return a status + // error to allow for further inspection; all other errors should + // already be status errors. + return toRPCErr(op(cs.attempt)) + } + if len(cs.buffer) == 0 { + // For the first op, which controls creation of the stream and + // assigns cs.attempt, we need to create a new attempt inline + // before executing the first op. On subsequent ops, the attempt + // is created immediately before replaying the ops. + var err error + if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil { + cs.mu.Unlock() + cs.finish(err) + return err + } + } + a := cs.attempt + cs.mu.Unlock() + err := op(a) + cs.mu.Lock() + if a != cs.attempt { + // We started another attempt already. + continue + } + if err == io.EOF { + <-a.s.Done() + } + if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + onSuccess() + cs.mu.Unlock() + return err + } + if err := cs.retryLocked(a, err); err != nil { + cs.mu.Unlock() + return err + } + } +} + +func (cs *clientStream) Header() (metadata.MD, error) { + var m metadata.MD + err := cs.withRetry(func(a *csAttempt) error { + var err error + m, err = a.s.Header() + return toRPCErr(err) + }, cs.commitAttemptLocked) + + if m == nil && err == nil { + // The stream ended with success. Finish the clientStream. + err = io.EOF + } + + if err != nil { + cs.finish(err) + // Do not return the error. The user should get it by calling Recv(). + return nil, nil + } + + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. + logEntry := &binarylog.ServerHeader{ + OnClientSide: true, + Header: m, + PeerAddr: nil, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + cs.serverHeaderBinlogged = true + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } + } + + return m, nil +} + +func (cs *clientStream) Trailer() metadata.MD { + // On RPC failure, we never need to retry, because usage requires that + // RecvMsg() returned a non-nil error before calling this function is valid. + // We would have retried earlier if necessary. + // + // Commit the attempt anyway, just in case users are not following those + // directions -- it will prevent races and should not meaningfully impact + // performance. + cs.commitAttempt() + if cs.attempt.s == nil { + return nil + } + return cs.attempt.s.Trailer() +} + +func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { + for _, f := range cs.buffer { + if err := f(attempt); err != nil { + return err + } + } + return nil +} + +func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) { + // Note: we still will buffer if retry is disabled (for transparent retries). + if cs.committed { + return + } + cs.bufferSize += sz + if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize { + cs.commitAttemptLocked() + return + } + cs.buffer = append(cs.buffer, op) +} + +func (cs *clientStream) SendMsg(m any) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + cs.finish(err) + } + }() + if cs.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !cs.desc.ClientStreams { + cs.sentLast = true + } + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > *cs.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize) + } + op := func(a *csAttempt) error { + return a.sendMsg(m, hdr, payload, data) + } + err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) }) + if len(cs.binlogs) != 0 && err == nil { + cm := &binarylog.ClientMessage{ + OnClientSide: true, + Message: data, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, cm) + } + } + return err +} + +func (cs *clientStream) RecvMsg(m any) error { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { + // Call Header() to binary log header if it's not already logged. + cs.Header() + } + var recvInfo *payloadInfo + if len(cs.binlogs) != 0 { + recvInfo = &payloadInfo{} + } + err := cs.withRetry(func(a *csAttempt) error { + return a.recvMsg(m, recvInfo) + }, cs.commitAttemptLocked) + if len(cs.binlogs) != 0 && err == nil { + sm := &binarylog.ServerMessage{ + OnClientSide: true, + Message: recvInfo.uncompressedBytes, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, sm) + } + } + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + } + return err +} + +func (cs *clientStream) CloseSend() error { + if cs.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + cs.sentLast = true + op := func(a *csAttempt) error { + a.t.Write(a.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil + } + cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }) + if len(cs.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, chc) + } + } + // We never returned an error here for reasons. + return nil +} + +func (cs *clientStream) finish(err error) { + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + cs.mu.Lock() + if cs.finished { + cs.mu.Unlock() + return + } + cs.finished = true + for _, onFinish := range cs.callInfo.onFinish { + onFinish(err) + } + cs.commitAttemptLocked() + if cs.attempt != nil { + cs.attempt.finish(err) + // after functions all rely upon having a stream. + if cs.attempt.s != nil { + for _, o := range cs.opts { + o.after(cs.callInfo, cs.attempt) + } + } + } + + cs.mu.Unlock() + // Only one of cancel or trailer needs to be logged. + if len(cs.binlogs) != 0 { + switch err { + case errContextCanceled, errContextDeadline, ErrClientConnClosing: + c := &binarylog.Cancel{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } + default: + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } + } + } + if err == nil { + cs.retryThrottler.successfulRPC() + } + if channelz.IsOn() { + if err != nil { + cs.cc.incrCallsFailed() + } else { + cs.cc.incrCallsSucceeded() + } + } + cs.cancel() +} + +func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { + cs := a.cs + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + a.mu.Unlock() + } + if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil { + if !cs.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now())) + } + if channelz.IsOn() { + a.t.IncrMsgSent() + } + return nil +} + +func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { + cs := a.cs + if len(a.statsHandlers) != 0 && payInfo == nil { + payInfo = &payloadInfo{} + } + + if !a.decompSet { + // Block until we receive headers containing received message encoding. + if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.dc == nil || a.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + a.dc = nil + a.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + a.dc = nil + } + // Only initialize this state once per stream. + a.decompSet = true + } + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp) + if err != nil { + if err == io.EOF { + if statusErr := a.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + + return toRPCErr(err) + } + if a.trInfo != nil { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + a.mu.Unlock() + } + for _, sh := range a.statsHandlers { + sh.HandleRPC(a.ctx, &stats.InPayload{ + Client: true, + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + Length: len(payInfo.uncompressedBytes), + }) + } + if channelz.IsOn() { + a.t.IncrMsgRecv() + } + if cs.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return a.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (a *csAttempt) finish(err error) { + a.mu.Lock() + if a.finished { + a.mu.Unlock() + return + } + a.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + var tr metadata.MD + if a.s != nil { + a.t.CloseStream(a.s, err) + tr = a.s.Trailer() + } + + if a.pickResult.Done != nil { + br := false + if a.s != nil { + br = a.s.BytesReceived() + } + a.pickResult.Done(balancer.DoneInfo{ + Err: err, + Trailer: tr, + BytesSent: a.s != nil, + BytesReceived: br, + ServerLoad: balancerload.Parse(tr), + }) + } + for _, sh := range a.statsHandlers { + end := &stats.End{ + Client: true, + BeginTime: a.beginTime, + EndTime: time.Now(), + Trailer: tr, + Error: err, + } + sh.HandleRPC(a.ctx, end) + } + if a.trInfo != nil && a.trInfo.tr != nil { + if err == nil { + a.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + a.trInfo.tr.LazyPrintf("RPC: [%v]", err) + a.trInfo.tr.SetError() + } + a.trInfo.tr.Finish() + a.trInfo.tr = nil + } + a.mu.Unlock() +} + +// newClientStream creates a ClientStream with the specified transport, on the +// given addrConn. +// +// It's expected that the given transport is either the same one in addrConn, or +// is already closed. To avoid race, transport is specified separately, instead +// of using ac.transpot. +// +// Main difference between this and ClientConn.NewStream: +// - no retry +// - no service config (or wait for service config) +// - no tracing or stats +func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method string, t transport.ClientTransport, ac *addrConn, opts ...CallOption) (_ ClientStream, err error) { + if t == nil { + // TODO: return RPC error here? + return nil, errors.New("transport provided is nil") + } + // defaultCallInfo contains unnecessary info(i.e. failfast, maxRetryRPCBufferSize), so we just initialize an empty struct. + c := &callInfo{} + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxReceiveMessageSize = getMaxSize(nil, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + c.maxSendMessageSize = getMaxSize(nil, c.maxSendMessageSize, defaultServerMaxSendMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: ac.cc.authority, + Method: method, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if ac.cc.dopts.cp != nil { + callHdr.SendCompress = ac.cc.dopts.cp.Type() + cp = ac.cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + + // Use a special addrConnStream to avoid retry. + as := &addrConnStream{ + callHdr: callHdr, + ac: ac, + ctx: ctx, + cancel: cancel, + opts: opts, + callInfo: c, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + t: t, + } + + s, err := as.t.NewStream(as.ctx, as.callHdr) + if err != nil { + err = toRPCErr(err) + return nil, err + } + as.s = s + as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} + ac.incrCallsStarted() + if desc != unaryStreamDesc { + // Listen on stream context to cleanup when the stream context is + // canceled. Also listen for the addrConn's context in case the + // addrConn is closed or reconnects to a different address. In all + // other cases, an error should already be injected into the recv + // buffer by the transport, which the client will eventually receive, + // and then we will cancel the stream's context in + // addrConnStream.finish. + go func() { + ac.mu.Lock() + acCtx := ac.ctx + ac.mu.Unlock() + select { + case <-acCtx.Done(): + as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) + case <-ctx.Done(): + as.finish(toRPCErr(ctx.Err())) + } + }() + } + return as, nil +} + +type addrConnStream struct { + s *transport.Stream + ac *addrConn + callHdr *transport.CallHdr + cancel context.CancelFunc + opts []CallOption + callInfo *callInfo + t transport.ClientTransport + ctx context.Context + sentLast bool + desc *StreamDesc + codec baseCodec + cp Compressor + comp encoding.Compressor + decompSet bool + dc Decompressor + decomp encoding.Compressor + p *parser + mu sync.Mutex + finished bool +} + +func (as *addrConnStream) Header() (metadata.MD, error) { + m, err := as.s.Header() + if err != nil { + as.finish(toRPCErr(err)) + } + return m, err +} + +func (as *addrConnStream) Trailer() metadata.MD { + return as.s.Trailer() +} + +func (as *addrConnStream) CloseSend() error { + if as.sentLast { + // TODO: return an error and finish the stream instead, due to API misuse? + return nil + } + as.sentLast = true + + as.t.Write(as.s, nil, nil, &transport.Options{Last: true}) + // Always return nil; io.EOF is the only error that might make sense + // instead, but there is no need to signal the client to call RecvMsg + // as the only use left for the stream after CloseSend is to call + // RecvMsg. This also matches historical behavior. + return nil +} + +func (as *addrConnStream) Context() context.Context { + return as.s.Context() +} + +func (as *addrConnStream) SendMsg(m any) (err error) { + defer func() { + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error in csAttempt.sendMsg; the real + // error will be returned from RecvMsg eventually in that case, or be + // retried.) + as.finish(err) + } + }() + if as.sentLast { + return status.Errorf(codes.Internal, "SendMsg called after CloseSend") + } + if !as.desc.ClientStreams { + as.sentLast = true + } + + // load hdr, payload, data + hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payld) > *as.callInfo.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize) + } + + if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil { + if !as.desc.ClientStreams { + // For non-client-streaming RPCs, we return nil instead of EOF on error + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + return nil + } + return io.EOF + } + + if channelz.IsOn() { + as.t.IncrMsgSent() + } + return nil +} + +func (as *addrConnStream) RecvMsg(m any) (err error) { + defer func() { + if err != nil || !as.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + as.finish(err) + } + }() + + if !as.decompSet { + // Block until we receive headers containing received message encoding. + if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if as.dc == nil || as.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + as.dc = nil + as.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + as.dc = nil + } + // Only initialize this state once per stream. + as.decompSet = true + } + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err != nil { + if err == io.EOF { + if statusErr := as.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + + if channelz.IsOn() { + as.t.IncrMsgRecv() + } + if as.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return as.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (as *addrConnStream) finish(err error) { + as.mu.Lock() + if as.finished { + as.mu.Unlock() + return + } + as.finished = true + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + if as.s != nil { + as.t.CloseStream(as.s, err) + } + + if err != nil { + as.ac.incrCallsFailed() + } else { + as.ac.incrCallsSucceeded() + } + as.cancel() + as.mu.Unlock() +} + +// ServerStream defines the server-side behavior of a streaming RPC. +// +// Errors returned from ServerStream methods are compatible with the status +// package. However, the status code will often not match the RPC status as +// seen by the client application, and therefore, should not be relied upon for +// this purpose. +type ServerStream interface { + // SetHeader sets the header metadata. It may be called multiple times. + // When call multiple times, all the provided metadata will be merged. + // All the metadata will be sent out when one of the following happens: + // - ServerStream.SendHeader() is called; + // - The first response is sent out; + // - An RPC status is sent out (error or success). + SetHeader(metadata.MD) error + // SendHeader sends the header metadata. + // The provided md and headers set by SetHeader() will be sent. + // It fails if called multiple times. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the RPC status. + // When called more than once, all the provided metadata will be merged. + SetTrailer(metadata.MD) + // Context returns the context for this stream. + Context() context.Context + // SendMsg sends a message. On error, SendMsg aborts the stream and the + // error is returned directly. + // + // SendMsg blocks until: + // - There is sufficient flow control to schedule m with the transport, or + // - The stream is done, or + // - The stream breaks. + // + // SendMsg does not wait until the message is received by the client. An + // untimely stream closure may result in lost messages. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not safe + // to call SendMsg on the same stream in different goroutines. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. + SendMsg(m any) error + // RecvMsg blocks until it receives a message into m or the stream is + // done. It returns io.EOF when the client has performed a CloseSend. On + // any non-EOF error, the stream is aborted and the error contains the + // RPC status. + // + // It is safe to have a goroutine calling SendMsg and another goroutine + // calling RecvMsg on the same stream at the same time, but it is not + // safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m any) error +} + +// serverStream implements a server side Stream. +type serverStream struct { + ctx context.Context + t transport.ServerTransport + s *transport.Stream + p *parser + codec baseCodec + + cp Compressor + dc Decompressor + comp encoding.Compressor + decomp encoding.Compressor + + sendCompressorName string + + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo + + statsHandler []stats.Handler + + binlogs []binarylog.MethodLogger + // serverHeaderBinlogged indicates whether server header has been logged. It + // will happen when one of the following two happens: stream.SendHeader(), + // stream.Send(). + // + // It's only checked in send and sendHeader, doesn't need to be + // synchronized. + serverHeaderBinlogged bool + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.ctx +} + +func (ss *serverStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + return ss.s.SetHeader(md) +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) + if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sh) + } + } + return err +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } + ss.s.SetTrailer(md) +} + +func (ss *serverStream) SendMsg(m any) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgSent() + } + }() + + // Server handler could have set new compressor by calling SetSendCompressor. + // In case it is set, we need to use it for compressing outbound message. + if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { + ss.comp = encoding.GetCompressor(sendCompressorsName) + ss.sendCompressorName = sendCompressorsName + } + + // load hdr, payload, data + hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp) + if err != nil { + return err + } + + // TODO(dfawley): should we be checking len(data) instead? + if len(payload) > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } + if len(ss.binlogs) != 0 { + if !ss.serverHeaderBinlogged { + h, _ := ss.s.Header() + sh := &binarylog.ServerHeader{ + Header: h, + } + ss.serverHeaderBinlogged = true + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sh) + } + } + sm := &binarylog.ServerMessage{ + Message: data, + } + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, sm) + } + } + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now())) + } + } + return nil +} + +func (ss *serverStream) RecvMsg(m any) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + // Non-user specified status was sent out. This should be an error + // case (as a server side Cancel maybe). + // + // This is not handled specifically now. User will return a final + // status from the service handler, we will log that error instead. + // This behavior is similar to an interceptor. + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgRecv() + } + }() + var payInfo *payloadInfo + if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 { + payInfo = &payloadInfo{} + } + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil { + if err == io.EOF { + if len(ss.binlogs) != 0 { + chc := &binarylog.ClientHalfClose{} + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, chc) + } + } + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + return toRPCErr(err) + } + if len(ss.statsHandler) != 0 { + for _, sh := range ss.statsHandler { + sh.HandleRPC(ss.s.Context(), &stats.InPayload{ + RecvTime: time.Now(), + Payload: m, + // TODO truncate large payload. + Data: payInfo.uncompressedBytes, + Length: len(payInfo.uncompressedBytes), + WireLength: payInfo.compressedLength + headerLen, + CompressedLength: payInfo.compressedLength, + }) + } + } + if len(ss.binlogs) != 0 { + cm := &binarylog.ClientMessage{ + Message: payInfo.uncompressedBytes, + } + for _, binlog := range ss.binlogs { + binlog.Log(ss.ctx, cm) + } + } + return nil +} + +// MethodFromServerStream returns the method string for the input stream. +// The returned string is in the format of "/service/method". +func MethodFromServerStream(stream ServerStream) (string, bool) { + return Method(stream.Context()) +} + +// prepareMsg returns the hdr, payload and data +// using the compressors passed or using the +// passed preparedmsg +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { + if preparedMsg, ok := m.(*PreparedMsg); ok { + return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil + } + // The input interface is not a prepared msg. + // Marshal and Compress the data at this point + data, err = encode(codec, m) + if err != nil { + return nil, nil, nil, err + } + compData, err := compress(data, cp, comp) + if err != nil { + return nil, nil, nil, err + } + hdr, payload = msgHeader(data, compData) + return hdr, payload, data, nil +} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go new file mode 100644 index 0000000000..bfa5dfa40e --- /dev/null +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -0,0 +1,56 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tap defines the function handles which are executed on the transport +// layer of gRPC-Go and related information. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +package tap + +import ( + "context" +) + +// Info defines the relevant information needed by the handles. +type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string + // TODO: More to be added. +} + +// ServerInHandle defines the function which runs before a new stream is +// created on the server side. If it returns a non-nil error, the stream will +// not be created and an error will be returned to the client. If the error +// returned is a status error, that status code and message will be used, +// otherwise PermissionDenied will be the code and err.Error() will be the +// message. +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). For other general +// usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. +type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 0000000000..9ded79321b --- /dev/null +++ b/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,123 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "sync" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing bool + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +// It may be mutated after construction; remoteAddr specifically may change +// during client-side use. +type firstLine struct { + mu sync.Mutex + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) SetRemoteAddr(addr net.Addr) { + f.mu.Lock() + f.remoteAddr = addr + f.mu.Unlock() +} + +func (f *firstLine) String() string { + f.mu.Lock() + defer f.mu.Unlock() + + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg any // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) + } + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) +} + +type fmtStringer struct { + format string + a []any +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go new file mode 100644 index 0000000000..724ad21021 --- /dev/null +++ b/vendor/google.golang.org/grpc/version.go @@ -0,0 +1,22 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +// Version is the current grpc version. +const Version = "1.58.3" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh new file mode 100644 index 0000000000..bbc9e2e3c8 --- /dev/null +++ b/vendor/google.golang.org/grpc/vet.sh @@ -0,0 +1,209 @@ +#!/bin/bash + +set -ex # Exit on error; debugging enabled. +set -o pipefail # Fail a pipe if any sub-command fails. + +# not makes sure the command passed to it does not exit with a return code of 0. +not() { + # This is required instead of the earlier (! $COMMAND) because subshells and + # pipefail don't work the same on Darwin as in Linux. + ! "$@" +} + +die() { + echo "$@" >&2 + exit 1 +} + +fail_on_output() { + tee /dev/stderr | not read +} + +# Check to make sure it's safe to modify the user's git repo. +git status --porcelain | fail_on_output + +# Undo any edits made by this script. +cleanup() { + git reset --hard HEAD +} +trap cleanup EXIT + +PATH="${HOME}/go/bin:${GOROOT}/bin:${PATH}" +go version + +if [[ "$1" = "-install" ]]; then + # Install the pinned versions as defined in module tools. + pushd ./test/tools + go install \ + golang.org/x/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell + popd + if [[ -z "${VET_SKIP_PROTO}" ]]; then + if [[ "${GITHUB_ACTIONS}" = "true" ]]; then + PROTOBUF_VERSION=22.0 # a.k.a v4.22.0 in pb.go files. + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/runner/go + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif not which protoc > /dev/null; then + die "Please install protoc into your path" + fi + fi + exit 0 +elif [[ "$#" -ne 0 ]]; then + die "Unknown argument(s): $*" +fi + +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + make proto && git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +if [[ -n "${VET_ONLY_PROTO}" ]]; then + exit 0 +fi + +# - Ensure all source files contain a copyright message. +# (Done in two parts because Darwin "git grep" has broken support for compound +# exclusion matches.) +(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output + +# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. +not grep 'func Test[^(]' *_test.go +not grep 'func Test[^(]' test/*.go + +# - Do not import x/net/context. +not git grep -l 'x/net/context' -- "*.go" + +# - Do not import math/rand for real library code. Use internal/grpcrand for +# thread safety. +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' + +# - Do not use "interface{}"; use "any" instead. +git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' + +# - Do not call grpclog directly. Use grpclog.Component instead. +git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' + +# - Ensure all ptypes proto packages are renamed when importing. +not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" + +# - Ensure all xds proto imports are renamed to *pb or *grpc. +git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' + +misspell -error . + +# - gofmt, goimports, golint (with exceptions for generated code), go vet, +# go mod tidy. +# Perform these checks on each module inside gRPC. +for MOD_FILE in $(find . -name 'go.mod'); do + MOD_DIR=$(dirname ${MOD_FILE}) + pushd ${MOD_DIR} + go vet -all ./... | fail_on_output + gofmt -s -d -l . 2>&1 | fail_on_output + goimports -l . 2>&1 | not grep -vE "\.pb\.go" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" + + go mod tidy -compat=1.19 + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) + popd +done + +# - Collection of static analysis checks +# +# TODO(dfawley): don't use deprecated functions in examples or first-party +# plugins. +# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. +SC_OUT="$(mktemp)" +staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true +# Error if anything other than deprecation warnings are printed. +not grep -v "is deprecated:.*SA1019" "${SC_OUT}" +# Only ignore the following deprecated types/fields/functions. +not grep -Fv '.CredsBundle +.HeaderMap +.Metadata is deprecated: use Attributes +.NewAddress +.NewServiceConfig +.Type is deprecated: use Attributes +BuildVersion is deprecated +balancer.ErrTransientFailure +balancer.Picker +extDesc.Filename is deprecated +github.com/golang/protobuf/jsonpb is deprecated +grpc.CallCustomCodec +grpc.Code +grpc.Compressor +grpc.CustomCodec +grpc.Decompressor +grpc.MaxMsgSize +grpc.MethodConfig +grpc.NewGZIPCompressor +grpc.NewGZIPDecompressor +grpc.RPCCompressor +grpc.RPCDecompressor +grpc.ServiceConfig +grpc.WithCompressor +grpc.WithDecompressor +grpc.WithDialer +grpc.WithMaxMsgSize +grpc.WithServiceConfig +grpc.WithTimeout +http.CloseNotifier +info.SecurityVersion +proto is deprecated +proto.InternalMessageInfo is deprecated +proto.EnumName is deprecated +proto.ErrInternalBadWireType is deprecated +proto.FileDescriptor is deprecated +proto.Marshaler is deprecated +proto.MessageType is deprecated +proto.RegisterEnum is deprecated +proto.RegisterFile is deprecated +proto.RegisterType is deprecated +proto.RegisterExtension is deprecated +proto.RegisteredExtension is deprecated +proto.RegisteredExtensions is deprecated +proto.RegisterMapType is deprecated +proto.Unmarshaler is deprecated +Target is deprecated: Use the Target field in the BuildOptions instead. +xxx_messageInfo_ +' "${SC_OUT}" + +# - special golint on package comments. +lint_package_comment_per_package() { + # Number of files in this go package. + fileCount=$(go list -f '{{len .GoFiles}}' $1) + if [ ${fileCount} -eq 0 ]; then + return 0 + fi + # Number of package errors generated by golint. + lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") + # golint complains about every file that's missing the package comment. If the + # number of files for this package is greater than the number of errors, there's + # at least one file with package comment, good. Otherwise, fail. + if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then + echo "Package $1 (with ${fileCount} files) is missing package comment" + return 1 + fi +} +lint_package_comment() { + set +ex + + count=0 + for i in $(go list ./...); do + lint_package_comment_per_package "$i" + ((count += $?)) + done + + set -ex + return $count +} +lint_package_comment + +echo SUCCESS diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go new file mode 100644 index 0000000000..5f28148d80 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -0,0 +1,665 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + "math" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Unmarshal reads the given []byte into the given proto.Message. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func Unmarshal(b []byte, m proto.Message) error { + return UnmarshalOptions{}.Unmarshal(b, m) +} + +// UnmarshalOptions is a configurable JSON format parser. +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // If AllowPartial is set, input for messages that will result in missing + // required fields will not return an error. + AllowPartial bool + + // If DiscardUnknown is set, unknown fields are ignored. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling + // google.protobuf.Any messages or extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + } +} + +// Unmarshal reads the given []byte and populates the given proto.Message +// using options in the UnmarshalOptions object. +// It will clear the message first before setting the fields. +// If it returns an error, the given message may be partially set. +// The provided message must be mutable (e.g., a non-nil pointer to a message). +func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { + return o.unmarshal(b, m) +} + +// unmarshal is a centralized function that all unmarshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for unmarshal that do not go through this. +func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error { + proto.Reset(m) + + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + dec := decoder{json.NewDecoder(b), o} + if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { + return err + } + + // Check for EOF. + tok, err := dec.Read() + if err != nil { + return err + } + if tok.Kind() != json.EOF { + return dec.unexpectedTokenError(tok) + } + + if o.AllowPartial { + return nil + } + return proto.CheckInitialized(m) +} + +type decoder struct { + *json.Decoder + opts UnmarshalOptions +} + +// newError returns an error object with position info. +func (d decoder) newError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("(line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unexpectedTokenError returns a syntax error for the given unexpected token. +func (d decoder) unexpectedTokenError(tok json.Token) error { + return d.syntaxError(tok.Pos(), "unexpected token %s", tok.RawString()) +} + +// syntaxError returns a syntax error for given position. +func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unmarshalMessage unmarshals a message into the given protoreflect.Message. +func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error { + if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil { + return unmarshal(d, m) + } + + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + var seenNums set.Ints + var seenOneofs set.Ints + fieldDescs := messageDesc.Fields() + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + return nil + case json.Name: + // Continue below. + } + + name := tok.Name() + // Unmarshaling a non-custom embedded message in Any will contain the + // JSON field "@type" which should be skipped because it is not a field + // of the embedded message, but simply an artifact of the Any format. + if skipTypeURL && name == "@type" { + d.Read() + continue + } + + // Get the FieldDescriptor. + var fd protoreflect.FieldDescriptor + if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") { + // Only extension names are in [name] format. + extName := protoreflect.FullName(name[1 : len(name)-1]) + extType, err := d.opts.Resolver.FindExtensionByName(extName) + if err != nil && err != protoregistry.NotFound { + return d.newError(tok.Pos(), "unable to resolve %s: %v", tok.RawString(), err) + } + if extType != nil { + fd = extType.TypeDescriptor() + if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { + return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) + } + } + } else { + // The name can either be the JSON name or the proto field name. + fd = fieldDescs.ByJSONName(name) + if fd == nil { + fd = fieldDescs.ByTextName(name) + } + } + if flags.ProtoLegacy { + if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { + fd = nil // reset since the weak reference is not linked in + } + } + + if fd == nil { + // Field is unknown. + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + + // Do not allow duplicate fields. + num := uint64(fd.Number()) + if seenNums.Has(num) { + return d.newError(tok.Pos(), "duplicate field %v", tok.RawString()) + } + seenNums.Set(num) + + // No need to set values for JSON null unless the field type is + // google.protobuf.Value or google.protobuf.NullValue. + if tok, _ := d.Peek(); tok.Kind() == json.Null && !isKnownValue(fd) && !isNullValue(fd) { + d.Read() + continue + } + + switch { + case fd.IsList(): + list := m.Mutable(fd).List() + if err := d.unmarshalList(list, fd); err != nil { + return err + } + case fd.IsMap(): + mmap := m.Mutable(fd).Map() + if err := d.unmarshalMap(mmap, fd); err != nil { + return err + } + default: + // If field is a oneof, check if it has already been set. + if od := fd.ContainingOneof(); od != nil { + idx := uint64(od.Index()) + if seenOneofs.Has(idx) { + return d.newError(tok.Pos(), "error parsing %s, oneof %v is already set", tok.RawString(), od.FullName()) + } + seenOneofs.Set(idx) + } + + // Required or optional fields. + if err := d.unmarshalSingular(m, fd); err != nil { + return err + } + } + } +} + +func isKnownValue(fd protoreflect.FieldDescriptor) bool { + md := fd.Message() + return md != nil && md.FullName() == genid.Value_message_fullname +} + +func isNullValue(fd protoreflect.FieldDescriptor) bool { + ed := fd.Enum() + return ed != nil && ed.FullName() == genid.NullValue_enum_fullname +} + +// unmarshalSingular unmarshals to the non-repeated field specified +// by the given FieldDescriptor. +func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.FieldDescriptor) error { + var val protoreflect.Value + var err error + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), false) + default: + val, err = d.unmarshalScalar(fd) + } + + if err != nil { + return err + } + m.Set(fd, val) + return nil +} + +// unmarshalScalar unmarshals to a scalar/enum protoreflect.Value specified by +// the given FieldDescriptor. +func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + const b32 int = 32 + const b64 int = 64 + + tok, err := d.Read() + if err != nil { + return protoreflect.Value{}, err + } + + kind := fd.Kind() + switch kind { + case protoreflect.BoolKind: + if tok.Kind() == json.Bool { + return protoreflect.ValueOfBool(tok.Bool()), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if v, ok := unmarshalInt(tok, b32); ok { + return v, nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if v, ok := unmarshalInt(tok, b64); ok { + return v, nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if v, ok := unmarshalUint(tok, b32); ok { + return v, nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if v, ok := unmarshalUint(tok, b64); ok { + return v, nil + } + + case protoreflect.FloatKind: + if v, ok := unmarshalFloat(tok, b32); ok { + return v, nil + } + + case protoreflect.DoubleKind: + if v, ok := unmarshalFloat(tok, b64); ok { + return v, nil + } + + case protoreflect.StringKind: + if tok.Kind() == json.String { + return protoreflect.ValueOfString(tok.ParsedString()), nil + } + + case protoreflect.BytesKind: + if v, ok := unmarshalBytes(tok); ok { + return v, nil + } + + case protoreflect.EnumKind: + if v, ok := unmarshalEnum(tok, fd); ok { + return v, nil + } + + default: + panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind)) + } + + return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) +} + +func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getInt(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getInt(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getInt(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Int(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfInt32(int32(n)), true + } + return protoreflect.ValueOfInt64(n), true +} + +func unmarshalUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getUint(tok, bitSize) + + case json.String: + // Decode number from string. + s := strings.TrimSpace(tok.ParsedString()) + if len(s) != len(tok.ParsedString()) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getUint(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getUint(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Uint(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfUint32(uint32(n)), true + } + return protoreflect.ValueOfUint64(n), true +} + +func unmarshalFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.Number: + return getFloat(tok, bitSize) + + case json.String: + s := tok.ParsedString() + switch s { + case "NaN": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.NaN())), true + } + return protoreflect.ValueOfFloat64(math.NaN()), true + case "Infinity": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.Inf(+1))), true + } + return protoreflect.ValueOfFloat64(math.Inf(+1)), true + case "-Infinity": + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(math.Inf(-1))), true + } + return protoreflect.ValueOfFloat64(math.Inf(-1)), true + } + + // Decode number from string. + if len(s) != len(strings.TrimSpace(s)) { + return protoreflect.Value{}, false + } + dec := json.NewDecoder([]byte(s)) + tok, err := dec.Read() + if err != nil { + return protoreflect.Value{}, false + } + return getFloat(tok, bitSize) + } + return protoreflect.Value{}, false +} + +func getFloat(tok json.Token, bitSize int) (protoreflect.Value, bool) { + n, ok := tok.Float(bitSize) + if !ok { + return protoreflect.Value{}, false + } + if bitSize == 32 { + return protoreflect.ValueOfFloat32(float32(n)), true + } + return protoreflect.ValueOfFloat64(n), true +} + +func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) { + if tok.Kind() != json.String { + return protoreflect.Value{}, false + } + + s := tok.ParsedString() + enc := base64.StdEncoding + if strings.ContainsAny(s, "-_") { + enc = base64.URLEncoding + } + if len(s)%4 != 0 { + enc = enc.WithPadding(base64.NoPadding) + } + b, err := enc.DecodeString(s) + if err != nil { + return protoreflect.Value{}, false + } + return protoreflect.ValueOfBytes(b), true +} + +func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) { + switch tok.Kind() { + case json.String: + // Lookup EnumNumber based on name. + s := tok.ParsedString() + if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil { + return protoreflect.ValueOfEnum(enumVal.Number()), true + } + + case json.Number: + if n, ok := tok.Int(32); ok { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(n)), true + } + + case json.Null: + // This is only valid for google.protobuf.NullValue. + if isNullValue(fd) { + return protoreflect.ValueOfEnum(0), true + } + } + + return protoreflect.Value{}, false +} + +func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ArrayOpen { + return d.unexpectedTokenError(tok) + } + + switch fd.Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val := list.NewElement() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return err + } + list.Append(val) + } + default: + for { + tok, err := d.Peek() + if err != nil { + return err + } + + if tok.Kind() == json.ArrayClose { + d.Read() + return nil + } + + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + list.Append(val) + } + } + + return nil +} + +func (d decoder) unmarshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside the for loop + // below. + var unmarshalMapValue func() (protoreflect.Value, error) + switch fd.MapValue().Kind() { + case protoreflect.MessageKind, protoreflect.GroupKind: + unmarshalMapValue = func() (protoreflect.Value, error) { + val := mmap.NewValue() + if err := d.unmarshalMessage(val.Message(), false); err != nil { + return protoreflect.Value{}, err + } + return val, nil + } + default: + unmarshalMapValue = func() (protoreflect.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } + +Loop: + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + default: + return d.unexpectedTokenError(tok) + case json.ObjectClose: + break Loop + case json.Name: + // Continue. + } + + // Unmarshal field name. + pkey, err := d.unmarshalMapKey(tok, fd.MapKey()) + if err != nil { + return err + } + + // Check for duplicate field name. + if mmap.Has(pkey) { + return d.newError(tok.Pos(), "duplicate map key %v", tok.RawString()) + } + + // Read and unmarshal field value. + pval, err := unmarshalMapValue() + if err != nil { + return err + } + + mmap.Set(pkey, pval) + } + + return nil +} + +// unmarshalMapKey converts given token of Name kind into a protoreflect.MapKey. +// A map key type is any integral or string type. +func (d decoder) unmarshalMapKey(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.MapKey, error) { + const b32 = 32 + const b64 = 64 + const base10 = 10 + + name := tok.Name() + kind := fd.Kind() + switch kind { + case protoreflect.StringKind: + return protoreflect.ValueOfString(name).MapKey(), nil + + case protoreflect.BoolKind: + switch name { + case "true": + return protoreflect.ValueOfBool(true).MapKey(), nil + case "false": + return protoreflect.ValueOfBool(false).MapKey(), nil + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if n, err := strconv.ParseInt(name, base10, b32); err == nil { + return protoreflect.ValueOfInt32(int32(n)).MapKey(), nil + } + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if n, err := strconv.ParseInt(name, base10, b64); err == nil { + return protoreflect.ValueOfInt64(int64(n)).MapKey(), nil + } + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if n, err := strconv.ParseUint(name, base10, b32); err == nil { + return protoreflect.ValueOfUint32(uint32(n)).MapKey(), nil + } + + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if n, err := strconv.ParseUint(name, base10, b64); err == nil { + return protoreflect.ValueOfUint64(uint64(n)).MapKey(), nil + } + + default: + panic(fmt.Sprintf("invalid kind for map key: %v", kind)) + } + + return protoreflect.MapKey{}, d.newError(tok.Pos(), "invalid value for %v key: %s", kind, tok.RawString()) +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go new file mode 100644 index 0000000000..21d5d2cb18 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protojson marshals and unmarshals protocol buffer messages as JSON +// format. It follows the guide at +// https://protobuf.dev/programming-guides/proto3#json. +// +// This package produces a different output than the standard "encoding/json" +// package, which does not operate correctly on protocol buffer messages. +package protojson diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go new file mode 100644 index 0000000000..66b95870e9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go @@ -0,0 +1,349 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "encoding/base64" + "fmt" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/order" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const defaultIndent = " " + +// Format formats the message as a multiline string. +// This function is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func Format(m proto.Message) string { + return MarshalOptions{Multiline: true}.Format(m) +} + +// Marshal writes the given proto.Message in JSON format using default options. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func Marshal(m proto.Message) ([]byte, error) { + return MarshalOptions{}.Marshal(m) +} + +// MarshalOptions is a configurable JSON format marshaler. +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Multiline specifies whether the marshaler should format the output in + // indented-form with every textual element on a new line. + // If Indent is an empty string, then an arbitrary indent is chosen. + Multiline bool + + // Indent specifies the set of indentation characters to use in a multiline + // formatted output such that every entry is preceded by Indent and + // terminated by a newline. If non-empty, then Multiline is treated as true. + // Indent can only be composed of space or tab characters. + Indent string + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return error if there are any missing required fields. + AllowPartial bool + + // UseProtoNames uses proto field name instead of lowerCamelCase name in JSON + // field names. + UseProtoNames bool + + // UseEnumNumbers emits enum values as numbers. + UseEnumNumbers bool + + // EmitUnpopulated specifies whether to emit unpopulated fields. It does not + // emit unpopulated oneof fields or unpopulated extension fields. + // The JSON value emitted for unpopulated fields are as follows: + // ╔═══════╤════════════════════════════╗ + // ║ JSON │ Protobuf field ║ + // ╠═══════╪════════════════════════════╣ + // ║ false │ proto3 boolean fields ║ + // ║ 0 │ proto3 numeric fields ║ + // ║ "" │ proto3 string/bytes fields ║ + // ║ null │ proto2 scalar fields ║ + // ║ null │ message fields ║ + // ║ [] │ list fields ║ + // ║ {} │ map fields ║ + // ╚═══════╧════════════════════════════╝ + EmitUnpopulated bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Format formats the message as a string. +// This method is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func (o MarshalOptions) Format(m proto.Message) string { + if m == nil || !m.ProtoReflect().IsValid() { + return "" // invalid syntax, but okay since this is for debugging + } + o.AllowPartial = true + b, _ := o.Marshal(m) + return string(b) +} + +// Marshal marshals the given proto.Message in the JSON format using options in +// MarshalOptions. Do not depend on the output being stable. It may change over +// time across different versions of the program. +func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { + return o.marshal(nil, m) +} + +// MarshalAppend appends the JSON format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m proto.Message) ([]byte, error) { + return o.marshal(b, m) +} + +// marshal is a centralized function that all marshal operations go through. +// For profiling purposes, avoid changing the name of this function or +// introducing other code paths for marshal that do not go through this. +func (o MarshalOptions) marshal(b []byte, m proto.Message) ([]byte, error) { + if o.Multiline && o.Indent == "" { + o.Indent = defaultIndent + } + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + internalEnc, err := json.NewEncoder(b, o.Indent) + if err != nil { + return nil, err + } + + // Treat nil message interface as an empty message, + // in which case the output in an empty JSON object. + if m == nil { + return append(b, '{', '}'), nil + } + + enc := encoder{internalEnc, o} + if err := enc.marshalMessage(m.ProtoReflect(), ""); err != nil { + return nil, err + } + if o.AllowPartial { + return enc.Bytes(), nil + } + return enc.Bytes(), proto.CheckInitialized(m) +} + +type encoder struct { + *json.Encoder + opts MarshalOptions +} + +// typeFieldDesc is a synthetic field descriptor used for the "@type" field. +var typeFieldDesc = func() protoreflect.FieldDescriptor { + var fd filedesc.Field + fd.L0.FullName = "@type" + fd.L0.Index = -1 + fd.L1.Cardinality = protoreflect.Optional + fd.L1.Kind = protoreflect.StringKind + return &fd +}() + +// typeURLFieldRanger wraps a protoreflect.Message and modifies its Range method +// to additionally iterate over a synthetic field for the type URL. +type typeURLFieldRanger struct { + order.FieldRanger + typeURL string +} + +func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if !f(typeFieldDesc, protoreflect.ValueOfString(m.typeURL)) { + return + } + m.FieldRanger.Range(f) +} + +// unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range +// method to additionally iterate over unpopulated fields. +type unpopulatedFieldRanger struct{ protoreflect.Message } + +func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if m.Has(fd) || fd.ContainingOneof() != nil { + continue // ignore populated fields and fields within a oneofs + } + + v := m.Get(fd) + isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid() + isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil + if isProto2Scalar || isSingularMessage { + v = protoreflect.Value{} // use invalid value to emit null + } + if !f(fd, v) { + return + } + } + m.Message.Range(f) +} + +// marshalMessage marshals the fields in the given protoreflect.Message. +// If the typeURL is non-empty, then a synthetic "@type" field is injected +// containing the URL as the value. +func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error { + if !flags.ProtoLegacy && messageset.IsMessageSet(m.Descriptor()) { + return errors.New("no support for proto1 MessageSets") + } + + if marshal := wellKnownTypeMarshaler(m.Descriptor().FullName()); marshal != nil { + return marshal(e, m) + } + + e.StartObject() + defer e.EndObject() + + var fields order.FieldRanger = m + if e.opts.EmitUnpopulated { + fields = unpopulatedFieldRanger{m} + } + if typeURL != "" { + fields = typeURLFieldRanger{fields, typeURL} + } + + var err error + order.RangeFields(fields, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + name := fd.JSONName() + if e.opts.UseProtoNames { + name = fd.TextName() + } + + if err = e.WriteName(name); err != nil { + return false + } + if err = e.marshalValue(v, fd); err != nil { + return false + } + return true + }) + return err +} + +// marshalValue marshals the given protoreflect.Value. +func (e encoder) marshalValue(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(val.List(), fd) + case fd.IsMap(): + return e.marshalMap(val.Map(), fd) + default: + return e.marshalSingular(val, fd) + } +} + +// marshalSingular marshals the given non-repeated field value. This includes +// all scalar types, enums, messages, and groups. +func (e encoder) marshalSingular(val protoreflect.Value, fd protoreflect.FieldDescriptor) error { + if !val.IsValid() { + e.WriteNull() + return nil + } + + switch kind := fd.Kind(); kind { + case protoreflect.BoolKind: + e.WriteBool(val.Bool()) + + case protoreflect.StringKind: + if e.WriteString(val.String()) != nil { + return errors.InvalidUTF8(string(fd.FullName())) + } + + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + e.WriteInt(val.Int()) + + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + e.WriteUint(val.Uint()) + + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Uint64Kind, + protoreflect.Sfixed64Kind, protoreflect.Fixed64Kind: + // 64-bit integers are written out as JSON string. + e.WriteString(val.String()) + + case protoreflect.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + + case protoreflect.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + + case protoreflect.BytesKind: + e.WriteString(base64.StdEncoding.EncodeToString(val.Bytes())) + + case protoreflect.EnumKind: + if fd.Enum().FullName() == genid.NullValue_enum_fullname { + e.WriteNull() + } else { + desc := fd.Enum().Values().ByNumber(val.Enum()) + if e.opts.UseEnumNumbers || desc == nil { + e.WriteInt(int64(val.Enum())) + } else { + e.WriteString(string(desc.Name())) + } + } + + case protoreflect.MessageKind, protoreflect.GroupKind: + if err := e.marshalMessage(val.Message(), ""); err != nil { + return err + } + + default: + panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) + } + return nil +} + +// marshalList marshals the given protoreflect.List. +func (e encoder) marshalList(list protoreflect.List, fd protoreflect.FieldDescriptor) error { + e.StartArray() + defer e.EndArray() + + for i := 0; i < list.Len(); i++ { + item := list.Get(i) + if err := e.marshalSingular(item, fd); err != nil { + return err + } + } + return nil +} + +// marshalMap marshals given protoreflect.Map. +func (e encoder) marshalMap(mmap protoreflect.Map, fd protoreflect.FieldDescriptor) error { + e.StartObject() + defer e.EndObject() + + var err error + order.RangeEntries(mmap, order.GenericKeyOrder, func(k protoreflect.MapKey, v protoreflect.Value) bool { + if err = e.WriteName(k.String()); err != nil { + return false + } + if err = e.marshalSingular(v, fd.MapValue()); err != nil { + return false + } + return true + }) + return err +} diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go new file mode 100644 index 0000000000..6c37d41744 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go @@ -0,0 +1,895 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protojson + +import ( + "bytes" + "fmt" + "math" + "strconv" + "strings" + "time" + + "google.golang.org/protobuf/internal/encoding/json" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/genid" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type marshalFunc func(encoder, protoreflect.Message) error + +// wellKnownTypeMarshaler returns a marshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeMarshaler(name protoreflect.FullName) marshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return encoder.marshalAny + case genid.Timestamp_message_name: + return encoder.marshalTimestamp + case genid.Duration_message_name: + return encoder.marshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return encoder.marshalWrapperType + case genid.Struct_message_name: + return encoder.marshalStruct + case genid.ListValue_message_name: + return encoder.marshalListValue + case genid.Value_message_name: + return encoder.marshalKnownValue + case genid.FieldMask_message_name: + return encoder.marshalFieldMask + case genid.Empty_message_name: + return encoder.marshalEmpty + } + } + return nil +} + +type unmarshalFunc func(decoder, protoreflect.Message) error + +// wellKnownTypeUnmarshaler returns a unmarshal function if the message type +// has specialized serialization behavior. It returns nil otherwise. +func wellKnownTypeUnmarshaler(name protoreflect.FullName) unmarshalFunc { + if name.Parent() == genid.GoogleProtobuf_package { + switch name.Name() { + case genid.Any_message_name: + return decoder.unmarshalAny + case genid.Timestamp_message_name: + return decoder.unmarshalTimestamp + case genid.Duration_message_name: + return decoder.unmarshalDuration + case genid.BoolValue_message_name, + genid.Int32Value_message_name, + genid.Int64Value_message_name, + genid.UInt32Value_message_name, + genid.UInt64Value_message_name, + genid.FloatValue_message_name, + genid.DoubleValue_message_name, + genid.StringValue_message_name, + genid.BytesValue_message_name: + return decoder.unmarshalWrapperType + case genid.Struct_message_name: + return decoder.unmarshalStruct + case genid.ListValue_message_name: + return decoder.unmarshalListValue + case genid.Value_message_name: + return decoder.unmarshalKnownValue + case genid.FieldMask_message_name: + return decoder.unmarshalFieldMask + case genid.Empty_message_name: + return decoder.unmarshalEmpty + } + } + return nil +} + +// The JSON representation of an Any message uses the regular representation of +// the deserialized, embedded message, with an additional field `@type` which +// contains the type URL. If the embedded message type is well-known and has a +// custom JSON representation, that representation will be embedded adding a +// field `value` which holds the custom JSON in addition to the `@type` field. + +func (e encoder) marshalAny(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + if !m.Has(fdType) { + if !m.Has(fdValue) { + // If message is empty, marshal out empty JSON object. + e.StartObject() + e.EndObject() + return nil + } else { + // Return error if type_url field is not set, but value is set. + return errors.New("%s: %v is not set", genid.Any_message_fullname, genid.Any_TypeUrl_field_name) + } + } + + typeVal := m.Get(fdType) + valueVal := m.Get(fdValue) + + // Resolve the type in order to unmarshal value field. + typeURL := typeVal.String() + emt, err := e.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return errors.New("%s: unable to resolve %q: %v", genid.Any_message_fullname, typeURL, err) + } + + em := emt.New() + err = proto.UnmarshalOptions{ + AllowPartial: true, // never check required fields inside an Any + Resolver: e.opts.Resolver, + }.Unmarshal(valueVal.Bytes(), em.Interface()) + if err != nil { + return errors.New("%s: unable to unmarshal %q: %v", genid.Any_message_fullname, typeURL, err) + } + + // If type of value has custom JSON encoding, marshal out a field "value" + // with corresponding custom JSON encoding of the embedded message as a + // field. + if marshal := wellKnownTypeMarshaler(emt.Descriptor().FullName()); marshal != nil { + e.StartObject() + defer e.EndObject() + + // Marshal out @type field. + e.WriteName("@type") + if err := e.WriteString(typeURL); err != nil { + return err + } + + e.WriteName("value") + return marshal(e, em) + } + + // Else, marshal out the embedded message's fields in this Any object. + if err := e.marshalMessage(em, typeURL); err != nil { + return err + } + + return nil +} + +func (d decoder) unmarshalAny(m protoreflect.Message) error { + // Peek to check for json.ObjectOpen to avoid advancing a read. + start, err := d.Peek() + if err != nil { + return err + } + if start.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(start) + } + + // Use another decoder to parse the unread bytes for @type field. This + // avoids advancing a read from current decoder because the current JSON + // object may contain the fields of the embedded type. + dec := decoder{d.Clone(), UnmarshalOptions{}} + tok, err := findTypeURL(dec) + switch err { + case errEmptyObject: + // An empty JSON object translates to an empty Any message. + d.Read() // Read json.ObjectOpen. + d.Read() // Read json.ObjectClose. + return nil + + case errMissingType: + if d.opts.DiscardUnknown { + // Treat all fields as unknowns, similar to an empty object. + return d.skipJSONValue() + } + // Use start.Pos() for line position. + return d.newError(start.Pos(), err.Error()) + + default: + if err != nil { + return err + } + } + + typeURL := tok.ParsedString() + emt, err := d.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return d.newError(tok.Pos(), "unable to resolve %v: %q", tok.RawString(), err) + } + + // Create new message for the embedded message type and unmarshal into it. + em := emt.New() + if unmarshal := wellKnownTypeUnmarshaler(emt.Descriptor().FullName()); unmarshal != nil { + // If embedded message is a custom type, + // unmarshal the JSON "value" field into it. + if err := d.unmarshalAnyValue(unmarshal, em); err != nil { + return err + } + } else { + // Else unmarshal the current JSON object into it. + if err := d.unmarshalMessage(em, true); err != nil { + return err + } + } + // Serialize the embedded message and assign the resulting bytes to the + // proto value field. + b, err := proto.MarshalOptions{ + AllowPartial: true, // No need to check required fields inside an Any. + Deterministic: true, + }.Marshal(em.Interface()) + if err != nil { + return d.newError(start.Pos(), "error in marshaling Any.value field: %v", err) + } + + fds := m.Descriptor().Fields() + fdType := fds.ByNumber(genid.Any_TypeUrl_field_number) + fdValue := fds.ByNumber(genid.Any_Value_field_number) + + m.Set(fdType, protoreflect.ValueOfString(typeURL)) + m.Set(fdValue, protoreflect.ValueOfBytes(b)) + return nil +} + +var errEmptyObject = fmt.Errorf(`empty object`) +var errMissingType = fmt.Errorf(`missing "@type" field`) + +// findTypeURL returns the token for the "@type" field value from the given +// JSON bytes. It is expected that the given bytes start with json.ObjectOpen. +// It returns errEmptyObject if the JSON object is empty or errMissingType if +// @type field does not exist. It returns other error if the @type field is not +// valid or other decoding issues. +func findTypeURL(d decoder) (json.Token, error) { + var typeURL string + var typeTok json.Token + numFields := 0 + // Skip start object. + d.Read() + +Loop: + for { + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + + switch tok.Kind() { + case json.ObjectClose: + if typeURL == "" { + // Did not find @type field. + if numFields > 0 { + return json.Token{}, errMissingType + } + return json.Token{}, errEmptyObject + } + break Loop + + case json.Name: + numFields++ + if tok.Name() != "@type" { + // Skip value. + if err := d.skipJSONValue(); err != nil { + return json.Token{}, err + } + continue + } + + // Return error if this was previously set already. + if typeURL != "" { + return json.Token{}, d.newError(tok.Pos(), `duplicate "@type" field`) + } + // Read field value. + tok, err := d.Read() + if err != nil { + return json.Token{}, err + } + if tok.Kind() != json.String { + return json.Token{}, d.newError(tok.Pos(), `@type field value is not a string: %v`, tok.RawString()) + } + typeURL = tok.ParsedString() + if typeURL == "" { + return json.Token{}, d.newError(tok.Pos(), `@type field contains empty value`) + } + typeTok = tok + } + } + + return typeTok, nil +} + +// skipJSONValue parses a JSON value (null, boolean, string, number, object and +// array) in order to advance the read to the next JSON value. It relies on +// the decoder returning an error if the types are not in valid sequence. +func (d decoder) skipJSONValue() error { + tok, err := d.Read() + if err != nil { + return err + } + // Only need to continue reading for objects and arrays. + switch tok.Kind() { + case json.ObjectOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + case json.Name: + // Skip object field value. + if err := d.skipJSONValue(); err != nil { + return err + } + } + } + + case json.ArrayOpen: + for { + tok, err := d.Peek() + if err != nil { + return err + } + switch tok.Kind() { + case json.ArrayClose: + d.Read() + return nil + default: + // Skip array item. + if err := d.skipJSONValue(); err != nil { + return err + } + } + } + } + return nil +} + +// unmarshalAnyValue unmarshals the given custom-type message from the JSON +// object's "value" field. +func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Message) error { + // Skip ObjectOpen, and start reading the fields. + d.Read() + + var found bool // Used for detecting duplicate "value". + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + if !found { + return d.newError(tok.Pos(), `missing "value" field`) + } + return nil + + case json.Name: + switch tok.Name() { + case "@type": + // Skip the value as this was previously parsed already. + d.Read() + + case "value": + if found { + return d.newError(tok.Pos(), `duplicate "value" field`) + } + // Unmarshal the field value into the given message. + if err := unmarshal(d, m); err != nil { + return err + } + found = true + + default: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + } + } + } +} + +// Wrapper types are encoded as JSON primitives like string, number or boolean. + +func (e encoder) marshalWrapperType(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val := m.Get(fd) + return e.marshalSingular(val, fd) +} + +func (d decoder) unmarshalWrapperType(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.WrapperValue_Value_field_number) + val, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + m.Set(fd, val) + return nil +} + +// The JSON representation for Empty is an empty JSON object. + +func (e encoder) marshalEmpty(protoreflect.Message) error { + e.StartObject() + e.EndObject() + return nil +} + +func (d decoder) unmarshalEmpty(protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.ObjectOpen { + return d.unexpectedTokenError(tok) + } + + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case json.ObjectClose: + return nil + + case json.Name: + if d.opts.DiscardUnknown { + if err := d.skipJSONValue(); err != nil { + return err + } + continue + } + return d.newError(tok.Pos(), "unknown field %v", tok.RawString()) + + default: + return d.unexpectedTokenError(tok) + } + } +} + +// The JSON representation for Struct is a JSON object that contains the encoded +// Struct.fields map and follows the serialization rules for a map. + +func (e encoder) marshalStruct(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return e.marshalMap(m.Get(fd).Map(), fd) +} + +func (d decoder) unmarshalStruct(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.Struct_Fields_field_number) + return d.unmarshalMap(m.Mutable(fd).Map(), fd) +} + +// The JSON representation for ListValue is JSON array that contains the encoded +// ListValue.values repeated field and follows the serialization rules for a +// repeated field. + +func (e encoder) marshalListValue(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return e.marshalList(m.Get(fd).List(), fd) +} + +func (d decoder) unmarshalListValue(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.ListValue_Values_field_number) + return d.unmarshalList(m.Mutable(fd).List(), fd) +} + +// The JSON representation for a Value is dependent on the oneof field that is +// set. Each of the field in the oneof has its own custom serialization rule. A +// Value message needs to be a oneof field set, else it is an error. + +func (e encoder) marshalKnownValue(m protoreflect.Message) error { + od := m.Descriptor().Oneofs().ByName(genid.Value_Kind_oneof_name) + fd := m.WhichOneof(od) + if fd == nil { + return errors.New("%s: none of the oneof fields is set", genid.Value_message_fullname) + } + if fd.Number() == genid.Value_NumberValue_field_number { + if v := m.Get(fd).Float(); math.IsNaN(v) || math.IsInf(v, 0) { + return errors.New("%s: invalid %v value", genid.Value_NumberValue_field_fullname, v) + } + } + return e.marshalSingular(m.Get(fd), fd) +} + +func (d decoder) unmarshalKnownValue(m protoreflect.Message) error { + tok, err := d.Peek() + if err != nil { + return err + } + + var fd protoreflect.FieldDescriptor + var val protoreflect.Value + switch tok.Kind() { + case json.Null: + d.Read() + fd = m.Descriptor().Fields().ByNumber(genid.Value_NullValue_field_number) + val = protoreflect.ValueOfEnum(0) + + case json.Bool: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_BoolValue_field_number) + val = protoreflect.ValueOfBool(tok.Bool()) + + case json.Number: + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_NumberValue_field_number) + var ok bool + val, ok = unmarshalFloat(tok, 64) + if !ok { + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + case json.String: + // A JSON string may have been encoded from the number_value field, + // e.g. "NaN", "Infinity", etc. Parsing a proto double type also allows + // for it to be in JSON string form. Given this custom encoding spec, + // however, there is no way to identify that and hence a JSON string is + // always assigned to the string_value field, which means that certain + // encoding cannot be parsed back to the same field. + tok, err := d.Read() + if err != nil { + return err + } + fd = m.Descriptor().Fields().ByNumber(genid.Value_StringValue_field_number) + val = protoreflect.ValueOfString(tok.ParsedString()) + + case json.ObjectOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_StructValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalStruct(val.Message()); err != nil { + return err + } + + case json.ArrayOpen: + fd = m.Descriptor().Fields().ByNumber(genid.Value_ListValue_field_number) + val = m.NewField(fd) + if err := d.unmarshalListValue(val.Message()); err != nil { + return err + } + + default: + return d.newError(tok.Pos(), "invalid %v: %v", genid.Value_message_fullname, tok.RawString()) + } + + m.Set(fd, val) + return nil +} + +// The JSON representation for a Duration is a JSON string that ends in the +// suffix "s" (indicating seconds) and is preceded by the number of seconds, +// with nanoseconds expressed as fractional seconds. +// +// Durations less than one second are represented with a 0 seconds field and a +// positive or negative nanos field. For durations of one second or more, a +// non-zero value for the nanos field must be of the same sign as the seconds +// field. +// +// Duration.seconds must be from -315,576,000,000 to +315,576,000,000 inclusive. +// Duration.nanos must be from -999,999,999 to +999,999,999 inclusive. + +const ( + secondsInNanos = 999999999 + maxSecondsInDuration = 315576000000 +) + +func (e encoder) marshalDuration(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return errors.New("%s: seconds out of range %v", genid.Duration_message_fullname, secs) + } + if nanos < -secondsInNanos || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Duration_message_fullname, nanos) + } + if (secs > 0 && nanos < 0) || (secs < 0 && nanos > 0) { + return errors.New("%s: signs of seconds and nanos do not match", genid.Duration_message_fullname) + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + var sign string + if secs < 0 || nanos < 0 { + sign, secs, nanos = "-", -1*secs, -1*nanos + } + x := fmt.Sprintf("%s%d.%09d", sign, secs, nanos) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "s") + return nil +} + +func (d decoder) unmarshalDuration(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + secs, nanos, ok := parseDuration(tok.ParsedString()) + if !ok { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Duration_message_fullname, tok.RawString()) + } + // Validate seconds. No need to validate nanos because parseDuration would + // have covered that already. + if secs < -maxSecondsInDuration || secs > maxSecondsInDuration { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Duration_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Duration_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Duration_Nanos_field_number) + + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(nanos)) + return nil +} + +// parseDuration parses the given input string for seconds and nanoseconds value +// for the Duration JSON format. The format is a decimal number with a suffix +// 's'. It can have optional plus/minus sign. There needs to be at least an +// integer or fractional part. Fractional part is limited to 9 digits only for +// nanoseconds precision, regardless of whether there are trailing zero digits. +// Example values are 1s, 0.1s, 1.s, .1s, +1s, -1s, -.1s. +func parseDuration(input string) (int64, int32, bool) { + b := []byte(input) + size := len(b) + if size < 2 { + return 0, 0, false + } + if b[size-1] != 's' { + return 0, 0, false + } + b = b[:size-1] + + // Read optional plus/minus symbol. + var neg bool + switch b[0] { + case '-': + neg = true + b = b[1:] + case '+': + b = b[1:] + } + if len(b) == 0 { + return 0, 0, false + } + + // Read the integer part. + var intp []byte + switch { + case b[0] == '0': + b = b[1:] + + case '1' <= b[0] && b[0] <= '9': + intp = b[0:] + b = b[1:] + n := 1 + for len(b) > 0 && '0' <= b[0] && b[0] <= '9' { + n++ + b = b[1:] + } + intp = intp[:n] + + case b[0] == '.': + // Continue below. + + default: + return 0, 0, false + } + + hasFrac := false + var frac [9]byte + if len(b) > 0 { + if b[0] != '.' { + return 0, 0, false + } + // Read the fractional part. + b = b[1:] + n := 0 + for len(b) > 0 && n < 9 && '0' <= b[0] && b[0] <= '9' { + frac[n] = b[0] + n++ + b = b[1:] + } + // It is not valid if there are more bytes left. + if len(b) > 0 { + return 0, 0, false + } + // Pad fractional part with 0s. + for i := n; i < 9; i++ { + frac[i] = '0' + } + hasFrac = true + } + + var secs int64 + if len(intp) > 0 { + var err error + secs, err = strconv.ParseInt(string(intp), 10, 64) + if err != nil { + return 0, 0, false + } + } + + var nanos int64 + if hasFrac { + nanob := bytes.TrimLeft(frac[:], "0") + if len(nanob) > 0 { + var err error + nanos, err = strconv.ParseInt(string(nanob), 10, 32) + if err != nil { + return 0, 0, false + } + } + } + + if neg { + if secs > 0 { + secs = -secs + } + if nanos > 0 { + nanos = -nanos + } + } + return secs, int32(nanos), true +} + +// The JSON representation for a Timestamp is a JSON string in the RFC 3339 +// format, i.e. "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" where +// {year} is always expressed using four digits while {month}, {day}, {hour}, +// {min}, and {sec} are zero-padded to two digits each. The fractional seconds, +// which can go up to 9 digits, up to 1 nanosecond resolution, is optional. The +// "Z" suffix indicates the timezone ("UTC"); the timezone is required. Encoding +// should always use UTC (as indicated by "Z") and a decoder should be able to +// accept both UTC and other timezones (as indicated by an offset). +// +// Timestamp.seconds must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z +// inclusive. +// Timestamp.nanos must be from 0 to 999,999,999 inclusive. + +const ( + maxTimestampSeconds = 253402300799 + minTimestampSeconds = -62135596800 +) + +func (e encoder) marshalTimestamp(m protoreflect.Message) error { + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + secsVal := m.Get(fdSeconds) + nanosVal := m.Get(fdNanos) + secs := secsVal.Int() + nanos := nanosVal.Int() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return errors.New("%s: seconds out of range %v", genid.Timestamp_message_fullname, secs) + } + if nanos < 0 || nanos > secondsInNanos { + return errors.New("%s: nanos out of range %v", genid.Timestamp_message_fullname, nanos) + } + // Uses RFC 3339, where generated output will be Z-normalized and uses 0, 3, + // 6 or 9 fractional digits. + t := time.Unix(secs, nanos).UTC() + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + e.WriteString(x + "Z") + return nil +} + +func (d decoder) unmarshalTimestamp(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + + s := tok.ParsedString() + t, err := time.Parse(time.RFC3339Nano, s) + if err != nil { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate seconds. + secs := t.Unix() + if secs < minTimestampSeconds || secs > maxTimestampSeconds { + return d.newError(tok.Pos(), "%v value out of range: %v", genid.Timestamp_message_fullname, tok.RawString()) + } + // Validate subseconds. + i := strings.LastIndexByte(s, '.') // start of subsecond field + j := strings.LastIndexAny(s, "Z-+") // start of timezone field + if i >= 0 && j >= i && j-i > len(".999999999") { + return d.newError(tok.Pos(), "invalid %v value %v", genid.Timestamp_message_fullname, tok.RawString()) + } + + fds := m.Descriptor().Fields() + fdSeconds := fds.ByNumber(genid.Timestamp_Seconds_field_number) + fdNanos := fds.ByNumber(genid.Timestamp_Nanos_field_number) + + m.Set(fdSeconds, protoreflect.ValueOfInt64(secs)) + m.Set(fdNanos, protoreflect.ValueOfInt32(int32(t.Nanosecond()))) + return nil +} + +// The JSON representation for a FieldMask is a JSON string where paths are +// separated by a comma. Fields name in each path are converted to/from +// lower-camel naming conventions. Encoding should fail if the path name would +// end up differently after a round-trip. + +func (e encoder) marshalFieldMask(m protoreflect.Message) error { + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Get(fd).List() + paths := make([]string, 0, list.Len()) + + for i := 0; i < list.Len(); i++ { + s := list.Get(i).String() + if !protoreflect.FullName(s).IsValid() { + return errors.New("%s contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s) + } + // Return error if conversion to camelCase is not reversible. + cc := strs.JSONCamelCase(s) + if s != strs.JSONSnakeCase(cc) { + return errors.New("%s contains irreversible value %q", genid.FieldMask_Paths_field_fullname, s) + } + paths = append(paths, cc) + } + + e.WriteString(strings.Join(paths, ",")) + return nil +} + +func (d decoder) unmarshalFieldMask(m protoreflect.Message) error { + tok, err := d.Read() + if err != nil { + return err + } + if tok.Kind() != json.String { + return d.unexpectedTokenError(tok) + } + str := strings.TrimSpace(tok.ParsedString()) + if str == "" { + return nil + } + paths := strings.Split(str, ",") + + fd := m.Descriptor().Fields().ByNumber(genid.FieldMask_Paths_field_number) + list := m.Mutable(fd).List() + + for _, s0 := range paths { + s := strs.JSONSnakeCase(s0) + if strings.Contains(s0, "_") || !protoreflect.FullName(s).IsValid() { + return d.newError(tok.Pos(), "%v contains invalid path: %q", genid.FieldMask_Paths_field_fullname, s0) + } + list.Append(protoreflect.ValueOfString(s)) + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go new file mode 100644 index 0000000000..d043a6ebe0 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go @@ -0,0 +1,340 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "io" + "regexp" + "unicode/utf8" + + "google.golang.org/protobuf/internal/errors" +) + +// call specifies which Decoder method was invoked. +type call uint8 + +const ( + readCall call = iota + peekCall +) + +const unexpectedFmt = "unexpected token %s" + +// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. +var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) + +// Decoder is a token-based JSON decoder. +type Decoder struct { + // lastCall is last method called, either readCall or peekCall. + // Initial value is readCall. + lastCall call + + // lastToken contains the last read token. + lastToken Token + + // lastErr contains the last read error. + lastErr error + + // openStack is a stack containing ObjectOpen and ArrayOpen values. The + // top of stack represents the object or the array the current value is + // directly located in. + openStack []Kind + + // orig is used in reporting line and column. + orig []byte + // in contains the unconsumed input. + in []byte +} + +// NewDecoder returns a Decoder to read the given []byte. +func NewDecoder(b []byte) *Decoder { + return &Decoder{orig: b, in: b} +} + +// Peek looks ahead and returns the next token kind without advancing a read. +func (d *Decoder) Peek() (Token, error) { + defer func() { d.lastCall = peekCall }() + if d.lastCall == readCall { + d.lastToken, d.lastErr = d.Read() + } + return d.lastToken, d.lastErr +} + +// Read returns the next JSON token. +// It will return an error if there is no valid token. +func (d *Decoder) Read() (Token, error) { + const scalar = Null | Bool | Number | String + + defer func() { d.lastCall = readCall }() + if d.lastCall == peekCall { + return d.lastToken, d.lastErr + } + + tok, err := d.parseNext() + if err != nil { + return Token{}, err + } + + switch tok.kind { + case EOF: + if len(d.openStack) != 0 || + d.lastToken.kind&scalar|ObjectClose|ArrayClose == 0 { + return Token{}, ErrUnexpectedEOF + } + + case Null: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case Bool, Number: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + + case String: + if d.isValueNext() { + break + } + // This string token should only be for a field name. + if d.lastToken.kind&(ObjectOpen|comma) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + if len(d.in) == 0 { + return Token{}, ErrUnexpectedEOF + } + if c := d.in[0]; c != ':' { + return Token{}, d.newSyntaxError(d.currPos(), `unexpected character %s, missing ":" after field name`, string(c)) + } + tok.kind = Name + d.consume(1) + + case ObjectOpen, ArrayOpen: + if !d.isValueNext() { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = append(d.openStack, tok.kind) + + case ObjectClose: + if len(d.openStack) == 0 || + d.lastToken.kind == comma || + d.openStack[len(d.openStack)-1] != ObjectOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case ArrayClose: + if len(d.openStack) == 0 || + d.lastToken.kind == comma || + d.openStack[len(d.openStack)-1] != ArrayOpen { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + d.openStack = d.openStack[:len(d.openStack)-1] + + case comma: + if len(d.openStack) == 0 || + d.lastToken.kind&(scalar|ObjectClose|ArrayClose) == 0 { + return Token{}, d.newSyntaxError(tok.pos, unexpectedFmt, tok.RawString()) + } + } + + // Update d.lastToken only after validating token to be in the right sequence. + d.lastToken = tok + + if d.lastToken.kind == comma { + return d.Read() + } + return tok, nil +} + +// Any sequence that looks like a non-delimiter (for error reporting). +var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9]{1,32}|.)`) + +// parseNext parses for the next JSON token. It returns a Token object for +// different types, except for Name. It does not handle whether the next token +// is in a valid sequence or not. +func (d *Decoder) parseNext() (Token, error) { + // Trim leading spaces. + d.consume(0) + + in := d.in + if len(in) == 0 { + return d.consumeToken(EOF, 0), nil + } + + switch in[0] { + case 'n': + if n := matchWithDelim("null", in); n != 0 { + return d.consumeToken(Null, n), nil + } + + case 't': + if n := matchWithDelim("true", in); n != 0 { + return d.consumeBoolToken(true, n), nil + } + + case 'f': + if n := matchWithDelim("false", in); n != 0 { + return d.consumeBoolToken(false, n), nil + } + + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + if n, ok := parseNumber(in); ok { + return d.consumeToken(Number, n), nil + } + + case '"': + s, n, err := d.parseString(in) + if err != nil { + return Token{}, err + } + return d.consumeStringToken(s, n), nil + + case '{': + return d.consumeToken(ObjectOpen, 1), nil + + case '}': + return d.consumeToken(ObjectClose, 1), nil + + case '[': + return d.consumeToken(ArrayOpen, 1), nil + + case ']': + return d.consumeToken(ArrayClose, 1), nil + + case ',': + return d.consumeToken(comma, 1), nil + } + return Token{}, d.newSyntaxError(d.currPos(), "invalid value %s", errRegexp.Find(in)) +} + +// newSyntaxError returns an error with line and column information useful for +// syntax errors. +func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error { + e := errors.New(f, x...) + line, column := d.Position(pos) + return errors.New("syntax error (line %d:%d): %v", line, column, e) +} + +// Position returns line and column number of given index of the original input. +// It will panic if index is out of range. +func (d *Decoder) Position(idx int) (line int, column int) { + b := d.orig[:idx] + line = bytes.Count(b, []byte("\n")) + 1 + if i := bytes.LastIndexByte(b, '\n'); i >= 0 { + b = b[i+1:] + } + column = utf8.RuneCount(b) + 1 // ignore multi-rune characters + return line, column +} + +// currPos returns the current index position of d.in from d.orig. +func (d *Decoder) currPos() int { + return len(d.orig) - len(d.in) +} + +// matchWithDelim matches s with the input b and verifies that the match +// terminates with a delimiter of some form (e.g., r"[^-+_.a-zA-Z0-9]"). +// As a special case, EOF is considered a delimiter. It returns the length of s +// if there is a match, else 0. +func matchWithDelim(s string, b []byte) int { + if !bytes.HasPrefix(b, []byte(s)) { + return 0 + } + + n := len(s) + if n < len(b) && isNotDelim(b[n]) { + return 0 + } + return n +} + +// isNotDelim returns true if given byte is a not delimiter character. +func isNotDelim(c byte) bool { + return (c == '-' || c == '+' || c == '.' || c == '_' || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9')) +} + +// consume consumes n bytes of input and any subsequent whitespace. +func (d *Decoder) consume(n int) { + d.in = d.in[n:] + for len(d.in) > 0 { + switch d.in[0] { + case ' ', '\n', '\r', '\t': + d.in = d.in[1:] + default: + return + } + } +} + +// isValueNext returns true if next type should be a JSON value: Null, +// Number, String or Bool. +func (d *Decoder) isValueNext() bool { + if len(d.openStack) == 0 { + return d.lastToken.kind == 0 + } + + start := d.openStack[len(d.openStack)-1] + switch start { + case ObjectOpen: + return d.lastToken.kind&Name != 0 + case ArrayOpen: + return d.lastToken.kind&(ArrayOpen|comma) != 0 + } + panic(fmt.Sprintf( + "unreachable logic in Decoder.isValueNext, lastToken.kind: %v, openStack: %v", + d.lastToken.kind, start)) +} + +// consumeToken constructs a Token for given Kind with raw value derived from +// current d.in and given size, and consumes the given size-length of it. +func (d *Decoder) consumeToken(kind Kind, size int) Token { + tok := Token{ + kind: kind, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + } + d.consume(size) + return tok +} + +// consumeBoolToken constructs a Token for a Bool kind with raw value derived from +// current d.in and given size. +func (d *Decoder) consumeBoolToken(b bool, size int) Token { + tok := Token{ + kind: Bool, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + boo: b, + } + d.consume(size) + return tok +} + +// consumeStringToken constructs a Token for a String kind with raw value derived +// from current d.in and given size. +func (d *Decoder) consumeStringToken(s string, size int) Token { + tok := Token{ + kind: String, + raw: d.in[:size], + pos: len(d.orig) - len(d.in), + str: s, + } + d.consume(size) + return tok +} + +// Clone returns a copy of the Decoder for use in reading ahead the next JSON +// object, array or other values without affecting current Decoder. +func (d *Decoder) Clone() *Decoder { + ret := *d + ret.openStack = append([]Kind(nil), ret.openStack...) + return &ret +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go new file mode 100644 index 0000000000..2999d71332 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_number.go @@ -0,0 +1,254 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "strconv" +) + +// parseNumber reads the given []byte for a valid JSON number. If it is valid, +// it returns the number of bytes. Parsing logic follows the definition in +// https://tools.ietf.org/html/rfc7159#section-6, and is based off +// encoding/json.isValidNumber function. +func parseNumber(input []byte) (int, bool) { + var n int + + s := input + if len(s) == 0 { + return 0, false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + + // Digits + switch { + case s[0] == '0': + s = s[1:] + n++ + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + n++ + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + + default: + return 0, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + n += 2 + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + n++ + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return 0, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + } + + // Check that next byte is a delimiter or it is at the end. + if n < len(input) && isNotDelim(input[n]) { + return 0, false + } + + return n, true +} + +// numberParts is the result of parsing out a valid JSON number. It contains +// the parts of a number. The parts are used for integer conversion. +type numberParts struct { + neg bool + intp []byte + frac []byte + exp []byte +} + +// parseNumber constructs numberParts from given []byte. The logic here is +// similar to consumeNumber above with the difference of having to construct +// numberParts. The slice fields in numberParts are subslices of the input. +func parseNumberParts(input []byte) (numberParts, bool) { + var neg bool + var intp []byte + var frac []byte + var exp []byte + + s := input + if len(s) == 0 { + return numberParts{}, false + } + + // Optional - + if s[0] == '-' { + neg = true + s = s[1:] + if len(s) == 0 { + return numberParts{}, false + } + } + + // Digits + switch { + case s[0] == '0': + // Skip first 0 and no need to store. + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + intp = s + n := 1 + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + intp = intp[:n] + + default: + return numberParts{}, false + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + frac = s[1:] + n := 1 + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + frac = frac[:n] + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + exp = s + n := 0 + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return numberParts{}, false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + exp = exp[:n] + } + + return numberParts{ + neg: neg, + intp: intp, + frac: bytes.TrimRight(frac, "0"), // Remove unnecessary 0s to the right. + exp: exp, + }, true +} + +// normalizeToIntString returns an integer string in normal form without the +// E-notation for given numberParts. It will return false if it is not an +// integer or if the exponent exceeds than max/min int value. +func normalizeToIntString(n numberParts) (string, bool) { + intpSize := len(n.intp) + fracSize := len(n.frac) + + if intpSize == 0 && fracSize == 0 { + return "0", true + } + + var exp int + if len(n.exp) > 0 { + i, err := strconv.ParseInt(string(n.exp), 10, 32) + if err != nil { + return "", false + } + exp = int(i) + } + + var num []byte + if exp >= 0 { + // For positive E, shift fraction digits into integer part and also pad + // with zeroes as needed. + + // If there are more digits in fraction than the E value, then the + // number is not an integer. + if fracSize > exp { + return "", false + } + + // Make sure resulting digits are within max value limit to avoid + // unnecessarily constructing a large byte slice that may simply fail + // later on. + const maxDigits = 20 // Max uint64 value has 20 decimal digits. + if intpSize+exp > maxDigits { + return "", false + } + + // Set cap to make a copy of integer part when appended. + num = n.intp[:len(n.intp):len(n.intp)] + num = append(num, n.frac...) + for i := 0; i < exp-fracSize; i++ { + num = append(num, '0') + } + } else { + // For negative E, shift digits in integer part out. + + // If there are fractions, then the number is not an integer. + if fracSize > 0 { + return "", false + } + + // index is where the decimal point will be after adjusting for negative + // exponent. + index := intpSize + exp + if index < 0 { + return "", false + } + + num = n.intp + // If any of the digits being shifted to the right of the decimal point + // is non-zero, then the number is not an integer. + for i := index; i < intpSize; i++ { + if num[i] != '0' { + return "", false + } + } + num = num[:index] + } + + if n.neg { + return "-" + string(num), true + } + return string(num), true +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go new file mode 100644 index 0000000000..f7fea7d8dd --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_string.go @@ -0,0 +1,91 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "google.golang.org/protobuf/internal/strs" +) + +func (d *Decoder) parseString(in []byte) (string, int, error) { + in0 := in + if len(in) == 0 { + return "", 0, ErrUnexpectedEOF + } + if in[0] != '"' { + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q at start of string", in[0]) + } + in = in[1:] + i := indexNeedEscapeInBytes(in) + in, out := in[i:], in[:i:i] // set cap to prevent mutations + for len(in) > 0 { + switch r, n := utf8.DecodeRune(in); { + case r == utf8.RuneError && n == 1: + return "", 0, d.newSyntaxError(d.currPos(), "invalid UTF-8 in string") + case r < ' ': + return "", 0, d.newSyntaxError(d.currPos(), "invalid character %q in string", r) + case r == '"': + in = in[1:] + n := len(in0) - len(in) + return string(out), n, nil + case r == '\\': + if len(in) < 2 { + return "", 0, ErrUnexpectedEOF + } + switch r := in[1]; r { + case '"', '\\', '/': + in, out = in[2:], append(out, r) + case 'b': + in, out = in[2:], append(out, '\b') + case 'f': + in, out = in[2:], append(out, '\f') + case 'n': + in, out = in[2:], append(out, '\n') + case 'r': + in, out = in[2:], append(out, '\r') + case 't': + in, out = in[2:], append(out, '\t') + case 'u': + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + if err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + + r := rune(v) + if utf16.IsSurrogate(r) { + if len(in) < 6 { + return "", 0, ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + r = utf16.DecodeRune(r, rune(v)) + if in[0] != '\\' || in[1] != 'u' || + r == unicode.ReplacementChar || err != nil { + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:6]) + } + in = in[6:] + } + out = append(out, string(r)...) + default: + return "", 0, d.newSyntaxError(d.currPos(), "invalid escape code %q in string", in[:2]) + } + default: + i := indexNeedEscapeInBytes(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + return "", 0, ErrUnexpectedEOF +} + +// indexNeedEscapeInBytes returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go new file mode 100644 index 0000000000..50578d6593 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode_token.go @@ -0,0 +1,192 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "bytes" + "fmt" + "strconv" +) + +// Kind represents a token kind expressible in the JSON format. +type Kind uint16 + +const ( + Invalid Kind = (1 << iota) / 2 + EOF + Null + Bool + Number + String + Name + ObjectOpen + ObjectClose + ArrayOpen + ArrayClose + + // comma is only for parsing in between tokens and + // does not need to be exported. + comma +) + +func (k Kind) String() string { + switch k { + case EOF: + return "eof" + case Null: + return "null" + case Bool: + return "bool" + case Number: + return "number" + case String: + return "string" + case ObjectOpen: + return "{" + case ObjectClose: + return "}" + case Name: + return "name" + case ArrayOpen: + return "[" + case ArrayClose: + return "]" + case comma: + return "," + } + return "" +} + +// Token provides a parsed token kind and value. +// +// Values are provided by the difference accessor methods. The accessor methods +// Name, Bool, and ParsedString will panic if called on the wrong kind. There +// are different accessor methods for the Number kind for converting to the +// appropriate Go numeric type and those methods have the ok return value. +type Token struct { + // Token kind. + kind Kind + // pos provides the position of the token in the original input. + pos int + // raw bytes of the serialized token. + // This is a subslice into the original input. + raw []byte + // boo is parsed boolean value. + boo bool + // str is parsed string value. + str string +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + return t.kind +} + +// RawString returns the read value in string. +func (t Token) RawString() string { + return string(t.raw) +} + +// Pos returns the token position from the input. +func (t Token) Pos() int { + return t.pos +} + +// Name returns the object name if token is Name, else it panics. +func (t Token) Name() string { + if t.kind == Name { + return t.str + } + panic(fmt.Sprintf("Token is not a Name: %v", t.RawString())) +} + +// Bool returns the bool value if token kind is Bool, else it panics. +func (t Token) Bool() bool { + if t.kind == Bool { + return t.boo + } + panic(fmt.Sprintf("Token is not a Bool: %v", t.RawString())) +} + +// ParsedString returns the string value for a JSON string token or the read +// value in string if token is not a string. +func (t Token) ParsedString() string { + if t.kind == String { + return t.str + } + panic(fmt.Sprintf("Token is not a String: %v", t.RawString())) +} + +// Float returns the floating-point number if token kind is Number. +// +// The floating-point precision is specified by the bitSize parameter: 32 for +// float32 or 64 for float64. If bitSize=32, the result still has type float64, +// but it will be convertible to float32 without changing its value. It will +// return false if the number exceeds the floating point limits for given +// bitSize. +func (t Token) Float(bitSize int) (float64, bool) { + if t.kind != Number { + return 0, false + } + f, err := strconv.ParseFloat(t.RawString(), bitSize) + if err != nil { + return 0, false + } + return f, true +} + +// Int returns the signed integer number if token is Number. +// +// The given bitSize specifies the integer type that the result must fit into. +// It returns false if the number is not an integer value or if the result +// exceeds the limits for given bitSize. +func (t Token) Int(bitSize int) (int64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseInt(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +// Uint returns the signed integer number if token is Number. +// +// The given bitSize specifies the unsigned integer type that the result must +// fit into. It returns false if the number is not an unsigned integer value +// or if the result exceeds the limits for given bitSize. +func (t Token) Uint(bitSize int) (uint64, bool) { + s, ok := t.getIntStr() + if !ok { + return 0, false + } + n, err := strconv.ParseUint(s, 10, bitSize) + if err != nil { + return 0, false + } + return n, true +} + +func (t Token) getIntStr() (string, bool) { + if t.kind != Number { + return "", false + } + parts, ok := parseNumberParts(t.raw) + if !ok { + return "", false + } + return normalizeToIntString(parts) +} + +// TokenEquals returns true if given Tokens are equal, else false. +func TokenEquals(x, y Token) bool { + return x.kind == y.kind && + x.pos == y.pos && + bytes.Equal(x.raw, y.raw) && + x.boo == y.boo && + x.str == y.str +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go new file mode 100644 index 0000000000..934f2dcb39 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/json/encode.go @@ -0,0 +1,278 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "math" + "math/bits" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/errors" +) + +// kind represents an encoding type. +type kind uint8 + +const ( + _ kind = (1 << iota) / 2 + name + scalar + objectOpen + objectClose + arrayOpen + arrayClose +) + +// Encoder provides methods to write out JSON constructs and values. The user is +// responsible for producing valid sequences of JSON constructs and values. +type Encoder struct { + indent string + lastKind kind + indents []byte + out []byte +} + +// NewEncoder returns an Encoder. +// +// If indent is a non-empty string, it causes every entry for an Array or Object +// to be preceded by the indent and trailed by a newline. +func NewEncoder(buf []byte, indent string) (*Encoder, error) { + e := &Encoder{ + out: buf, + } + if len(indent) > 0 { + if strings.Trim(indent, " \t") != "" { + return nil, errors.New("indent may only be composed of space or tab characters") + } + e.indent = indent + } + return e, nil +} + +// Bytes returns the content of the written bytes. +func (e *Encoder) Bytes() []byte { + return e.out +} + +// WriteNull writes out the null value. +func (e *Encoder) WriteNull() { + e.prepareNext(scalar) + e.out = append(e.out, "null"...) +} + +// WriteBool writes out the given boolean value. +func (e *Encoder) WriteBool(b bool) { + e.prepareNext(scalar) + if b { + e.out = append(e.out, "true"...) + } else { + e.out = append(e.out, "false"...) + } +} + +// WriteString writes out the given string in JSON string value. Returns error +// if input string contains invalid UTF-8. +func (e *Encoder) WriteString(s string) error { + e.prepareNext(scalar) + var err error + if e.out, err = appendString(e.out, s); err != nil { + return err + } + return nil +} + +// Sentinel error used for indicating invalid UTF-8. +var errInvalidUTF8 = errors.New("invalid UTF-8") + +func appendString(out []byte, in string) ([]byte, error) { + out = append(out, '"') + i := indexNeedEscapeInString(in) + in, out = in[i:], append(out, in[:i]...) + for len(in) > 0 { + switch r, n := utf8.DecodeRuneInString(in); { + case r == utf8.RuneError && n == 1: + return out, errInvalidUTF8 + case r < ' ' || r == '"' || r == '\\': + out = append(out, '\\') + switch r { + case '"', '\\': + out = append(out, byte(r)) + case '\b': + out = append(out, 'b') + case '\f': + out = append(out, 'f') + case '\n': + out = append(out, 'n') + case '\r': + out = append(out, 'r') + case '\t': + out = append(out, 't') + default: + out = append(out, 'u') + out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + default: + i := indexNeedEscapeInString(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + out = append(out, '"') + return out, nil +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInString(s string) int { + for i, r := range s { + if r < ' ' || r == '\\' || r == '"' || r == utf8.RuneError { + return i + } + } + return len(s) +} + +// WriteFloat writes out the given float and bitSize in JSON number value. +func (e *Encoder) WriteFloat(n float64, bitSize int) { + e.prepareNext(scalar) + e.out = appendFloat(e.out, n, bitSize) +} + +// appendFloat formats given float in bitSize, and appends to the given []byte. +func appendFloat(out []byte, n float64, bitSize int) []byte { + switch { + case math.IsNaN(n): + return append(out, `"NaN"`...) + case math.IsInf(n, +1): + return append(out, `"Infinity"`...) + case math.IsInf(n, -1): + return append(out, `"-Infinity"`...) + } + + // JSON number formatting logic based on encoding/json. + // See floatEncoder.encode for reference. + fmt := byte('f') + if abs := math.Abs(n); abs != 0 { + if bitSize == 64 && (abs < 1e-6 || abs >= 1e21) || + bitSize == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { + fmt = 'e' + } + } + out = strconv.AppendFloat(out, n, fmt, -1, bitSize) + if fmt == 'e' { + n := len(out) + if n >= 4 && out[n-4] == 'e' && out[n-3] == '-' && out[n-2] == '0' { + out[n-2] = out[n-1] + out = out[:n-1] + } + } + return out +} + +// WriteInt writes out the given signed integer in JSON number value. +func (e *Encoder) WriteInt(n int64) { + e.prepareNext(scalar) + e.out = strconv.AppendInt(e.out, n, 10) +} + +// WriteUint writes out the given unsigned integer in JSON number value. +func (e *Encoder) WriteUint(n uint64) { + e.prepareNext(scalar) + e.out = strconv.AppendUint(e.out, n, 10) +} + +// StartObject writes out the '{' symbol. +func (e *Encoder) StartObject() { + e.prepareNext(objectOpen) + e.out = append(e.out, '{') +} + +// EndObject writes out the '}' symbol. +func (e *Encoder) EndObject() { + e.prepareNext(objectClose) + e.out = append(e.out, '}') +} + +// WriteName writes out the given string in JSON string value and the name +// separator ':'. Returns error if input string contains invalid UTF-8, which +// should not be likely as protobuf field names should be valid. +func (e *Encoder) WriteName(s string) error { + e.prepareNext(name) + var err error + // Append to output regardless of error. + e.out, err = appendString(e.out, s) + e.out = append(e.out, ':') + return err +} + +// StartArray writes out the '[' symbol. +func (e *Encoder) StartArray() { + e.prepareNext(arrayOpen) + e.out = append(e.out, '[') +} + +// EndArray writes out the ']' symbol. +func (e *Encoder) EndArray() { + e.prepareNext(arrayClose) + e.out = append(e.out, ']') +} + +// prepareNext adds possible comma and indentation for the next value based +// on last type and indent option. It also updates lastKind to next. +func (e *Encoder) prepareNext(next kind) { + defer func() { + // Set lastKind to next. + e.lastKind = next + }() + + if len(e.indent) == 0 { + // Need to add comma on the following condition. + if e.lastKind&(scalar|objectClose|arrayClose) != 0 && + next&(name|scalar|objectOpen|arrayOpen) != 0 { + e.out = append(e.out, ',') + // For single-line output, add a random extra space after each + // comma to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } + return + } + + switch { + case e.lastKind&(objectOpen|arrayOpen) != 0: + // If next type is NOT closing, add indent and newline. + if next&(objectClose|arrayClose) == 0 { + e.indents = append(e.indents, e.indent...) + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + } + + case e.lastKind&(scalar|objectClose|arrayClose) != 0: + switch { + // If next type is either a value or name, add comma and newline. + case next&(name|scalar|objectOpen|arrayOpen) != 0: + e.out = append(e.out, ',', '\n') + + // If next type is a closing object or array, adjust indentation. + case next&(objectClose|arrayClose) != 0: + e.indents = e.indents[:len(e.indents)-len(e.indent)] + e.out = append(e.out, '\n') + } + e.out = append(e.out, e.indents...) + + case e.lastKind&name != 0: + e.out = append(e.out, ' ') + // For multi-line output, add a random extra space after key: to make + // output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } +} diff --git a/vendor/k8s.io/code-generator/generate-groups.sh b/vendor/k8s.io/code-generator/generate-groups.sh old mode 100755 new mode 100644 diff --git a/vendor/k8s.io/code-generator/generate-internal-groups.sh b/vendor/k8s.io/code-generator/generate-internal-groups.sh old mode 100755 new mode 100644 diff --git a/vendor/k8s.io/cri-api/LICENSE b/vendor/k8s.io/cri-api/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/k8s.io/cri-api/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.pb.go b/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.pb.go new file mode 100644 index 0000000000..36e4eebacd --- /dev/null +++ b/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.pb.go @@ -0,0 +1,47530 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: api.proto + +package v1 + +import ( + context "context" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Protocol int32 + +const ( + Protocol_TCP Protocol = 0 + Protocol_UDP Protocol = 1 + Protocol_SCTP Protocol = 2 +) + +var Protocol_name = map[int32]string{ + 0: "TCP", + 1: "UDP", + 2: "SCTP", +} + +var Protocol_value = map[string]int32{ + "TCP": 0, + "UDP": 1, + "SCTP": 2, +} + +func (x Protocol) String() string { + return proto.EnumName(Protocol_name, int32(x)) +} + +func (Protocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{0} +} + +type MountPropagation int32 + +const ( + // No mount propagation ("rprivate" in Linux terminology). + MountPropagation_PROPAGATION_PRIVATE MountPropagation = 0 + // Mounts get propagated from the host to the container ("rslave" in Linux). + MountPropagation_PROPAGATION_HOST_TO_CONTAINER MountPropagation = 1 + // Mounts get propagated from the host to the container and from the + // container to the host ("rshared" in Linux). + MountPropagation_PROPAGATION_BIDIRECTIONAL MountPropagation = 2 +) + +var MountPropagation_name = map[int32]string{ + 0: "PROPAGATION_PRIVATE", + 1: "PROPAGATION_HOST_TO_CONTAINER", + 2: "PROPAGATION_BIDIRECTIONAL", +} + +var MountPropagation_value = map[string]int32{ + "PROPAGATION_PRIVATE": 0, + "PROPAGATION_HOST_TO_CONTAINER": 1, + "PROPAGATION_BIDIRECTIONAL": 2, +} + +func (x MountPropagation) String() string { + return proto.EnumName(MountPropagation_name, int32(x)) +} + +func (MountPropagation) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{1} +} + +// A NamespaceMode describes the intended namespace configuration for each +// of the namespaces (Network, PID, IPC) in NamespaceOption. Runtimes should +// map these modes as appropriate for the technology underlying the runtime. +type NamespaceMode int32 + +const ( + // A POD namespace is common to all containers in a pod. + // For example, a container with a PID namespace of POD expects to view + // all of the processes in all of the containers in the pod. + NamespaceMode_POD NamespaceMode = 0 + // A CONTAINER namespace is restricted to a single container. + // For example, a container with a PID namespace of CONTAINER expects to + // view only the processes in that container. + NamespaceMode_CONTAINER NamespaceMode = 1 + // A NODE namespace is the namespace of the Kubernetes node. + // For example, a container with a PID namespace of NODE expects to view + // all of the processes on the host running the kubelet. + NamespaceMode_NODE NamespaceMode = 2 + // TARGET targets the namespace of another container. When this is specified, + // a target_id must be specified in NamespaceOption and refer to a container + // previously created with NamespaceMode CONTAINER. This containers namespace + // will be made to match that of container target_id. + // For example, a container with a PID namespace of TARGET expects to view + // all of the processes that container target_id can view. + NamespaceMode_TARGET NamespaceMode = 3 +) + +var NamespaceMode_name = map[int32]string{ + 0: "POD", + 1: "CONTAINER", + 2: "NODE", + 3: "TARGET", +} + +var NamespaceMode_value = map[string]int32{ + "POD": 0, + "CONTAINER": 1, + "NODE": 2, + "TARGET": 3, +} + +func (x NamespaceMode) String() string { + return proto.EnumName(NamespaceMode_name, int32(x)) +} + +func (NamespaceMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{2} +} + +type PodSandboxState int32 + +const ( + PodSandboxState_SANDBOX_READY PodSandboxState = 0 + PodSandboxState_SANDBOX_NOTREADY PodSandboxState = 1 +) + +var PodSandboxState_name = map[int32]string{ + 0: "SANDBOX_READY", + 1: "SANDBOX_NOTREADY", +} + +var PodSandboxState_value = map[string]int32{ + "SANDBOX_READY": 0, + "SANDBOX_NOTREADY": 1, +} + +func (x PodSandboxState) String() string { + return proto.EnumName(PodSandboxState_name, int32(x)) +} + +func (PodSandboxState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{3} +} + +type ContainerState int32 + +const ( + ContainerState_CONTAINER_CREATED ContainerState = 0 + ContainerState_CONTAINER_RUNNING ContainerState = 1 + ContainerState_CONTAINER_EXITED ContainerState = 2 + ContainerState_CONTAINER_UNKNOWN ContainerState = 3 +) + +var ContainerState_name = map[int32]string{ + 0: "CONTAINER_CREATED", + 1: "CONTAINER_RUNNING", + 2: "CONTAINER_EXITED", + 3: "CONTAINER_UNKNOWN", +} + +var ContainerState_value = map[string]int32{ + "CONTAINER_CREATED": 0, + "CONTAINER_RUNNING": 1, + "CONTAINER_EXITED": 2, + "CONTAINER_UNKNOWN": 3, +} + +func (x ContainerState) String() string { + return proto.EnumName(ContainerState_name, int32(x)) +} + +func (ContainerState) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{4} +} + +type ContainerEventType int32 + +const ( + // Container created + ContainerEventType_CONTAINER_CREATED_EVENT ContainerEventType = 0 + // Container started + ContainerEventType_CONTAINER_STARTED_EVENT ContainerEventType = 1 + // Container stopped + ContainerEventType_CONTAINER_STOPPED_EVENT ContainerEventType = 2 + // Container deleted + ContainerEventType_CONTAINER_DELETED_EVENT ContainerEventType = 3 +) + +var ContainerEventType_name = map[int32]string{ + 0: "CONTAINER_CREATED_EVENT", + 1: "CONTAINER_STARTED_EVENT", + 2: "CONTAINER_STOPPED_EVENT", + 3: "CONTAINER_DELETED_EVENT", +} + +var ContainerEventType_value = map[string]int32{ + "CONTAINER_CREATED_EVENT": 0, + "CONTAINER_STARTED_EVENT": 1, + "CONTAINER_STOPPED_EVENT": 2, + "CONTAINER_DELETED_EVENT": 3, +} + +func (x ContainerEventType) String() string { + return proto.EnumName(ContainerEventType_name, int32(x)) +} + +func (ContainerEventType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{5} +} + +type MetricType int32 + +const ( + MetricType_COUNTER MetricType = 0 + MetricType_GAUGE MetricType = 1 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", +} + +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, +} + +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} + +func (MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{6} +} + +type CgroupDriver int32 + +const ( + CgroupDriver_SYSTEMD CgroupDriver = 0 + CgroupDriver_CGROUPFS CgroupDriver = 1 +) + +var CgroupDriver_name = map[int32]string{ + 0: "SYSTEMD", + 1: "CGROUPFS", +} + +var CgroupDriver_value = map[string]int32{ + "SYSTEMD": 0, + "CGROUPFS": 1, +} + +func (x CgroupDriver) String() string { + return proto.EnumName(CgroupDriver_name, int32(x)) +} + +func (CgroupDriver) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{7} +} + +// Available profile types. +type SecurityProfile_ProfileType int32 + +const ( + // The container runtime default profile should be used. + SecurityProfile_RuntimeDefault SecurityProfile_ProfileType = 0 + // Disable the feature for the sandbox or the container. + SecurityProfile_Unconfined SecurityProfile_ProfileType = 1 + // A pre-defined profile on the node should be used. + SecurityProfile_Localhost SecurityProfile_ProfileType = 2 +) + +var SecurityProfile_ProfileType_name = map[int32]string{ + 0: "RuntimeDefault", + 1: "Unconfined", + 2: "Localhost", +} + +var SecurityProfile_ProfileType_value = map[string]int32{ + "RuntimeDefault": 0, + "Unconfined": 1, + "Localhost": 2, +} + +func (x SecurityProfile_ProfileType) String() string { + return proto.EnumName(SecurityProfile_ProfileType_name, int32(x)) +} + +func (SecurityProfile_ProfileType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{10, 0} +} + +type VersionRequest struct { + // Version of the kubelet runtime API. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VersionRequest) Reset() { *m = VersionRequest{} } +func (*VersionRequest) ProtoMessage() {} +func (*VersionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{0} +} +func (m *VersionRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VersionRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VersionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_VersionRequest.Merge(m, src) +} +func (m *VersionRequest) XXX_Size() int { + return m.Size() +} +func (m *VersionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_VersionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_VersionRequest proto.InternalMessageInfo + +func (m *VersionRequest) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +type VersionResponse struct { + // Version of the kubelet runtime API. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Name of the container runtime. + RuntimeName string `protobuf:"bytes,2,opt,name=runtime_name,json=runtimeName,proto3" json:"runtime_name,omitempty"` + // Version of the container runtime. The string must be + // semver-compatible. + RuntimeVersion string `protobuf:"bytes,3,opt,name=runtime_version,json=runtimeVersion,proto3" json:"runtime_version,omitempty"` + // API version of the container runtime. The string must be + // semver-compatible. + RuntimeApiVersion string `protobuf:"bytes,4,opt,name=runtime_api_version,json=runtimeApiVersion,proto3" json:"runtime_api_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VersionResponse) Reset() { *m = VersionResponse{} } +func (*VersionResponse) ProtoMessage() {} +func (*VersionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{1} +} +func (m *VersionResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VersionResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VersionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_VersionResponse.Merge(m, src) +} +func (m *VersionResponse) XXX_Size() int { + return m.Size() +} +func (m *VersionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_VersionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_VersionResponse proto.InternalMessageInfo + +func (m *VersionResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *VersionResponse) GetRuntimeName() string { + if m != nil { + return m.RuntimeName + } + return "" +} + +func (m *VersionResponse) GetRuntimeVersion() string { + if m != nil { + return m.RuntimeVersion + } + return "" +} + +func (m *VersionResponse) GetRuntimeApiVersion() string { + if m != nil { + return m.RuntimeApiVersion + } + return "" +} + +// DNSConfig specifies the DNS servers and search domains of a sandbox. +type DNSConfig struct { + // List of DNS servers of the cluster. + Servers []string `protobuf:"bytes,1,rep,name=servers,proto3" json:"servers,omitempty"` + // List of DNS search domains of the cluster. + Searches []string `protobuf:"bytes,2,rep,name=searches,proto3" json:"searches,omitempty"` + // List of DNS options. See https://linux.die.net/man/5/resolv.conf + // for all available options. + Options []string `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DNSConfig) Reset() { *m = DNSConfig{} } +func (*DNSConfig) ProtoMessage() {} +func (*DNSConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{2} +} +func (m *DNSConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DNSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DNSConfig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DNSConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_DNSConfig.Merge(m, src) +} +func (m *DNSConfig) XXX_Size() int { + return m.Size() +} +func (m *DNSConfig) XXX_DiscardUnknown() { + xxx_messageInfo_DNSConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_DNSConfig proto.InternalMessageInfo + +func (m *DNSConfig) GetServers() []string { + if m != nil { + return m.Servers + } + return nil +} + +func (m *DNSConfig) GetSearches() []string { + if m != nil { + return m.Searches + } + return nil +} + +func (m *DNSConfig) GetOptions() []string { + if m != nil { + return m.Options + } + return nil +} + +// PortMapping specifies the port mapping configurations of a sandbox. +type PortMapping struct { + // Protocol of the port mapping. + Protocol Protocol `protobuf:"varint,1,opt,name=protocol,proto3,enum=runtime.v1.Protocol" json:"protocol,omitempty"` + // Port number within the container. Default: 0 (not specified). + ContainerPort int32 `protobuf:"varint,2,opt,name=container_port,json=containerPort,proto3" json:"container_port,omitempty"` + // Port number on the host. Default: 0 (not specified). + HostPort int32 `protobuf:"varint,3,opt,name=host_port,json=hostPort,proto3" json:"host_port,omitempty"` + // Host IP. + HostIp string `protobuf:"bytes,4,opt,name=host_ip,json=hostIp,proto3" json:"host_ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PortMapping) Reset() { *m = PortMapping{} } +func (*PortMapping) ProtoMessage() {} +func (*PortMapping) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{3} +} +func (m *PortMapping) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PortMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PortMapping.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PortMapping) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortMapping.Merge(m, src) +} +func (m *PortMapping) XXX_Size() int { + return m.Size() +} +func (m *PortMapping) XXX_DiscardUnknown() { + xxx_messageInfo_PortMapping.DiscardUnknown(m) +} + +var xxx_messageInfo_PortMapping proto.InternalMessageInfo + +func (m *PortMapping) GetProtocol() Protocol { + if m != nil { + return m.Protocol + } + return Protocol_TCP +} + +func (m *PortMapping) GetContainerPort() int32 { + if m != nil { + return m.ContainerPort + } + return 0 +} + +func (m *PortMapping) GetHostPort() int32 { + if m != nil { + return m.HostPort + } + return 0 +} + +func (m *PortMapping) GetHostIp() string { + if m != nil { + return m.HostIp + } + return "" +} + +// Mount specifies a host volume to mount into a container. +type Mount struct { + // Path of the mount within the container. + ContainerPath string `protobuf:"bytes,1,opt,name=container_path,json=containerPath,proto3" json:"container_path,omitempty"` + // Path of the mount on the host. If the hostPath doesn't exist, then runtimes + // should report error. If the hostpath is a symbolic link, runtimes should + // follow the symlink and mount the real destination to container. + HostPath string `protobuf:"bytes,2,opt,name=host_path,json=hostPath,proto3" json:"host_path,omitempty"` + // If set, the mount is read-only. + Readonly bool `protobuf:"varint,3,opt,name=readonly,proto3" json:"readonly,omitempty"` + // If set, the mount needs SELinux relabeling. + SelinuxRelabel bool `protobuf:"varint,4,opt,name=selinux_relabel,json=selinuxRelabel,proto3" json:"selinux_relabel,omitempty"` + // Requested propagation mode. + Propagation MountPropagation `protobuf:"varint,5,opt,name=propagation,proto3,enum=runtime.v1.MountPropagation" json:"propagation,omitempty"` + // UidMappings specifies the runtime UID mappings for the mount. + UidMappings []*IDMapping `protobuf:"bytes,6,rep,name=uidMappings,proto3" json:"uidMappings,omitempty"` + // GidMappings specifies the runtime GID mappings for the mount. + GidMappings []*IDMapping `protobuf:"bytes,7,rep,name=gidMappings,proto3" json:"gidMappings,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Mount) Reset() { *m = Mount{} } +func (*Mount) ProtoMessage() {} +func (*Mount) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{4} +} +func (m *Mount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Mount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Mount.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Mount) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mount.Merge(m, src) +} +func (m *Mount) XXX_Size() int { + return m.Size() +} +func (m *Mount) XXX_DiscardUnknown() { + xxx_messageInfo_Mount.DiscardUnknown(m) +} + +var xxx_messageInfo_Mount proto.InternalMessageInfo + +func (m *Mount) GetContainerPath() string { + if m != nil { + return m.ContainerPath + } + return "" +} + +func (m *Mount) GetHostPath() string { + if m != nil { + return m.HostPath + } + return "" +} + +func (m *Mount) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *Mount) GetSelinuxRelabel() bool { + if m != nil { + return m.SelinuxRelabel + } + return false +} + +func (m *Mount) GetPropagation() MountPropagation { + if m != nil { + return m.Propagation + } + return MountPropagation_PROPAGATION_PRIVATE +} + +func (m *Mount) GetUidMappings() []*IDMapping { + if m != nil { + return m.UidMappings + } + return nil +} + +func (m *Mount) GetGidMappings() []*IDMapping { + if m != nil { + return m.GidMappings + } + return nil +} + +// IDMapping describes host to container ID mappings for a pod sandbox. +type IDMapping struct { + // HostId is the id on the host. + HostId uint32 `protobuf:"varint,1,opt,name=host_id,json=hostId,proto3" json:"host_id,omitempty"` + // ContainerId is the id in the container. + ContainerId uint32 `protobuf:"varint,2,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // Length is the size of the range to map. + Length uint32 `protobuf:"varint,3,opt,name=length,proto3" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IDMapping) Reset() { *m = IDMapping{} } +func (*IDMapping) ProtoMessage() {} +func (*IDMapping) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{5} +} +func (m *IDMapping) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IDMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_IDMapping.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *IDMapping) XXX_Merge(src proto.Message) { + xxx_messageInfo_IDMapping.Merge(m, src) +} +func (m *IDMapping) XXX_Size() int { + return m.Size() +} +func (m *IDMapping) XXX_DiscardUnknown() { + xxx_messageInfo_IDMapping.DiscardUnknown(m) +} + +var xxx_messageInfo_IDMapping proto.InternalMessageInfo + +func (m *IDMapping) GetHostId() uint32 { + if m != nil { + return m.HostId + } + return 0 +} + +func (m *IDMapping) GetContainerId() uint32 { + if m != nil { + return m.ContainerId + } + return 0 +} + +func (m *IDMapping) GetLength() uint32 { + if m != nil { + return m.Length + } + return 0 +} + +// UserNamespace describes the intended user namespace configuration for a pod sandbox. +type UserNamespace struct { + // Mode is the NamespaceMode for this UserNamespace. + // Note: NamespaceMode for UserNamespace currently supports only POD and NODE, not CONTAINER OR TARGET. + Mode NamespaceMode `protobuf:"varint,1,opt,name=mode,proto3,enum=runtime.v1.NamespaceMode" json:"mode,omitempty"` + // Uids specifies the UID mappings for the user namespace. + Uids []*IDMapping `protobuf:"bytes,2,rep,name=uids,proto3" json:"uids,omitempty"` + // Gids specifies the GID mappings for the user namespace. + Gids []*IDMapping `protobuf:"bytes,3,rep,name=gids,proto3" json:"gids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserNamespace) Reset() { *m = UserNamespace{} } +func (*UserNamespace) ProtoMessage() {} +func (*UserNamespace) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{6} +} +func (m *UserNamespace) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserNamespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UserNamespace.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UserNamespace) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserNamespace.Merge(m, src) +} +func (m *UserNamespace) XXX_Size() int { + return m.Size() +} +func (m *UserNamespace) XXX_DiscardUnknown() { + xxx_messageInfo_UserNamespace.DiscardUnknown(m) +} + +var xxx_messageInfo_UserNamespace proto.InternalMessageInfo + +func (m *UserNamespace) GetMode() NamespaceMode { + if m != nil { + return m.Mode + } + return NamespaceMode_POD +} + +func (m *UserNamespace) GetUids() []*IDMapping { + if m != nil { + return m.Uids + } + return nil +} + +func (m *UserNamespace) GetGids() []*IDMapping { + if m != nil { + return m.Gids + } + return nil +} + +// NamespaceOption provides options for Linux namespaces. +type NamespaceOption struct { + // Network namespace for this container/sandbox. + // Note: There is currently no way to set CONTAINER scoped network in the Kubernetes API. + // Namespaces currently set by the kubelet: POD, NODE + Network NamespaceMode `protobuf:"varint,1,opt,name=network,proto3,enum=runtime.v1.NamespaceMode" json:"network,omitempty"` + // PID namespace for this container/sandbox. + // Note: The CRI default is POD, but the v1.PodSpec default is CONTAINER. + // The kubelet's runtime manager will set this to CONTAINER explicitly for v1 pods. + // Namespaces currently set by the kubelet: POD, CONTAINER, NODE, TARGET + Pid NamespaceMode `protobuf:"varint,2,opt,name=pid,proto3,enum=runtime.v1.NamespaceMode" json:"pid,omitempty"` + // IPC namespace for this container/sandbox. + // Note: There is currently no way to set CONTAINER scoped IPC in the Kubernetes API. + // Namespaces currently set by the kubelet: POD, NODE + Ipc NamespaceMode `protobuf:"varint,3,opt,name=ipc,proto3,enum=runtime.v1.NamespaceMode" json:"ipc,omitempty"` + // Target Container ID for NamespaceMode of TARGET. This container must have been + // previously created in the same pod. It is not possible to specify different targets + // for each namespace. + TargetId string `protobuf:"bytes,4,opt,name=target_id,json=targetId,proto3" json:"target_id,omitempty"` + // UsernsOptions for this pod sandbox. + // The Kubelet picks the user namespace configuration to use for the pod sandbox. The mappings + // are specified as part of the UserNamespace struct. If the struct is nil, then the POD mode + // must be assumed. This is done for backward compatibility with older Kubelet versions that + // do not set a user namespace. + UsernsOptions *UserNamespace `protobuf:"bytes,5,opt,name=userns_options,json=usernsOptions,proto3" json:"userns_options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamespaceOption) Reset() { *m = NamespaceOption{} } +func (*NamespaceOption) ProtoMessage() {} +func (*NamespaceOption) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{7} +} +func (m *NamespaceOption) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NamespaceOption.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NamespaceOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceOption.Merge(m, src) +} +func (m *NamespaceOption) XXX_Size() int { + return m.Size() +} +func (m *NamespaceOption) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceOption.DiscardUnknown(m) +} + +var xxx_messageInfo_NamespaceOption proto.InternalMessageInfo + +func (m *NamespaceOption) GetNetwork() NamespaceMode { + if m != nil { + return m.Network + } + return NamespaceMode_POD +} + +func (m *NamespaceOption) GetPid() NamespaceMode { + if m != nil { + return m.Pid + } + return NamespaceMode_POD +} + +func (m *NamespaceOption) GetIpc() NamespaceMode { + if m != nil { + return m.Ipc + } + return NamespaceMode_POD +} + +func (m *NamespaceOption) GetTargetId() string { + if m != nil { + return m.TargetId + } + return "" +} + +func (m *NamespaceOption) GetUsernsOptions() *UserNamespace { + if m != nil { + return m.UsernsOptions + } + return nil +} + +// Int64Value is the wrapper of int64. +type Int64Value struct { + // The value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{8} +} +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(m, src) +} +func (m *Int64Value) XXX_Size() int { + return m.Size() +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64Value proto.InternalMessageInfo + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +// LinuxSandboxSecurityContext holds linux security configuration that will be +// applied to a sandbox. Note that: +// 1. It does not apply to containers in the pods. +// 2. It may not be applicable to a PodSandbox which does not contain any running +// process. +type LinuxSandboxSecurityContext struct { + // Configurations for the sandbox's namespaces. + // This will be used only if the PodSandbox uses namespace for isolation. + NamespaceOptions *NamespaceOption `protobuf:"bytes,1,opt,name=namespace_options,json=namespaceOptions,proto3" json:"namespace_options,omitempty"` + // Optional SELinux context to be applied. + SelinuxOptions *SELinuxOption `protobuf:"bytes,2,opt,name=selinux_options,json=selinuxOptions,proto3" json:"selinux_options,omitempty"` + // UID to run sandbox processes as, when applicable. + RunAsUser *Int64Value `protobuf:"bytes,3,opt,name=run_as_user,json=runAsUser,proto3" json:"run_as_user,omitempty"` + // GID to run sandbox processes as, when applicable. run_as_group should only + // be specified when run_as_user is specified; otherwise, the runtime MUST error. + RunAsGroup *Int64Value `protobuf:"bytes,8,opt,name=run_as_group,json=runAsGroup,proto3" json:"run_as_group,omitempty"` + // If set, the root filesystem of the sandbox is read-only. + ReadonlyRootfs bool `protobuf:"varint,4,opt,name=readonly_rootfs,json=readonlyRootfs,proto3" json:"readonly_rootfs,omitempty"` + // List of groups applied to the first process run in the sandbox, in + // addition to the sandbox's primary GID, and group memberships defined + // in the container image for the sandbox's primary UID of the container process. + // If the list is empty, no additional groups are added to any container. + // Note that group memberships defined in the container image for the sandbox's primary UID + // of the container process are still effective, even if they are not included in this list. + SupplementalGroups []int64 `protobuf:"varint,5,rep,packed,name=supplemental_groups,json=supplementalGroups,proto3" json:"supplemental_groups,omitempty"` + // Indicates whether the sandbox will be asked to run a privileged + // container. If a privileged container is to be executed within it, this + // MUST be true. + // This allows a sandbox to take additional security precautions if no + // privileged containers are expected to be run. + Privileged bool `protobuf:"varint,6,opt,name=privileged,proto3" json:"privileged,omitempty"` + // Seccomp profile for the sandbox. + Seccomp *SecurityProfile `protobuf:"bytes,9,opt,name=seccomp,proto3" json:"seccomp,omitempty"` + // AppArmor profile for the sandbox. + Apparmor *SecurityProfile `protobuf:"bytes,10,opt,name=apparmor,proto3" json:"apparmor,omitempty"` + // Seccomp profile for the sandbox, candidate values are: + // - runtime/default: the default profile for the container runtime + // - unconfined: unconfined profile, ie, no seccomp sandboxing + // - localhost/: the profile installed on the node. + // is the full path of the profile. + // + // Default: "", which is identical with unconfined. + SeccompProfilePath string `protobuf:"bytes,7,opt,name=seccomp_profile_path,json=seccompProfilePath,proto3" json:"seccomp_profile_path,omitempty"` // Deprecated: Do not use. + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LinuxSandboxSecurityContext) Reset() { *m = LinuxSandboxSecurityContext{} } +func (*LinuxSandboxSecurityContext) ProtoMessage() {} +func (*LinuxSandboxSecurityContext) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{9} +} +func (m *LinuxSandboxSecurityContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LinuxSandboxSecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LinuxSandboxSecurityContext.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LinuxSandboxSecurityContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_LinuxSandboxSecurityContext.Merge(m, src) +} +func (m *LinuxSandboxSecurityContext) XXX_Size() int { + return m.Size() +} +func (m *LinuxSandboxSecurityContext) XXX_DiscardUnknown() { + xxx_messageInfo_LinuxSandboxSecurityContext.DiscardUnknown(m) +} + +var xxx_messageInfo_LinuxSandboxSecurityContext proto.InternalMessageInfo + +func (m *LinuxSandboxSecurityContext) GetNamespaceOptions() *NamespaceOption { + if m != nil { + return m.NamespaceOptions + } + return nil +} + +func (m *LinuxSandboxSecurityContext) GetSelinuxOptions() *SELinuxOption { + if m != nil { + return m.SelinuxOptions + } + return nil +} + +func (m *LinuxSandboxSecurityContext) GetRunAsUser() *Int64Value { + if m != nil { + return m.RunAsUser + } + return nil +} + +func (m *LinuxSandboxSecurityContext) GetRunAsGroup() *Int64Value { + if m != nil { + return m.RunAsGroup + } + return nil +} + +func (m *LinuxSandboxSecurityContext) GetReadonlyRootfs() bool { + if m != nil { + return m.ReadonlyRootfs + } + return false +} + +func (m *LinuxSandboxSecurityContext) GetSupplementalGroups() []int64 { + if m != nil { + return m.SupplementalGroups + } + return nil +} + +func (m *LinuxSandboxSecurityContext) GetPrivileged() bool { + if m != nil { + return m.Privileged + } + return false +} + +func (m *LinuxSandboxSecurityContext) GetSeccomp() *SecurityProfile { + if m != nil { + return m.Seccomp + } + return nil +} + +func (m *LinuxSandboxSecurityContext) GetApparmor() *SecurityProfile { + if m != nil { + return m.Apparmor + } + return nil +} + +// Deprecated: Do not use. +func (m *LinuxSandboxSecurityContext) GetSeccompProfilePath() string { + if m != nil { + return m.SeccompProfilePath + } + return "" +} + +// A security profile which can be used for sandboxes and containers. +type SecurityProfile struct { + // Indicator which `ProfileType` should be applied. + ProfileType SecurityProfile_ProfileType `protobuf:"varint,1,opt,name=profile_type,json=profileType,proto3,enum=runtime.v1.SecurityProfile_ProfileType" json:"profile_type,omitempty"` + // Indicates that a pre-defined profile on the node should be used. + // Must only be set if `ProfileType` is `Localhost`. + // For seccomp, it must be an absolute path to the seccomp profile. + // For AppArmor, this field is the AppArmor `/` + LocalhostRef string `protobuf:"bytes,2,opt,name=localhost_ref,json=localhostRef,proto3" json:"localhost_ref,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SecurityProfile) Reset() { *m = SecurityProfile{} } +func (*SecurityProfile) ProtoMessage() {} +func (*SecurityProfile) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{10} +} +func (m *SecurityProfile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecurityProfile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SecurityProfile.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SecurityProfile) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityProfile.Merge(m, src) +} +func (m *SecurityProfile) XXX_Size() int { + return m.Size() +} +func (m *SecurityProfile) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityProfile.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityProfile proto.InternalMessageInfo + +func (m *SecurityProfile) GetProfileType() SecurityProfile_ProfileType { + if m != nil { + return m.ProfileType + } + return SecurityProfile_RuntimeDefault +} + +func (m *SecurityProfile) GetLocalhostRef() string { + if m != nil { + return m.LocalhostRef + } + return "" +} + +// LinuxPodSandboxConfig holds platform-specific configurations for Linux +// host platforms and Linux-based containers. +type LinuxPodSandboxConfig struct { + // Parent cgroup of the PodSandbox. + // The cgroupfs style syntax will be used, but the container runtime can + // convert it to systemd semantics if needed. + CgroupParent string `protobuf:"bytes,1,opt,name=cgroup_parent,json=cgroupParent,proto3" json:"cgroup_parent,omitempty"` + // LinuxSandboxSecurityContext holds sandbox security attributes. + SecurityContext *LinuxSandboxSecurityContext `protobuf:"bytes,2,opt,name=security_context,json=securityContext,proto3" json:"security_context,omitempty"` + // Sysctls holds linux sysctls config for the sandbox. + Sysctls map[string]string `protobuf:"bytes,3,rep,name=sysctls,proto3" json:"sysctls,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Optional overhead represents the overheads associated with this sandbox + Overhead *LinuxContainerResources `protobuf:"bytes,4,opt,name=overhead,proto3" json:"overhead,omitempty"` + // Optional resources represents the sum of container resources for this sandbox + Resources *LinuxContainerResources `protobuf:"bytes,5,opt,name=resources,proto3" json:"resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LinuxPodSandboxConfig) Reset() { *m = LinuxPodSandboxConfig{} } +func (*LinuxPodSandboxConfig) ProtoMessage() {} +func (*LinuxPodSandboxConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{11} +} +func (m *LinuxPodSandboxConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LinuxPodSandboxConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LinuxPodSandboxConfig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LinuxPodSandboxConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_LinuxPodSandboxConfig.Merge(m, src) +} +func (m *LinuxPodSandboxConfig) XXX_Size() int { + return m.Size() +} +func (m *LinuxPodSandboxConfig) XXX_DiscardUnknown() { + xxx_messageInfo_LinuxPodSandboxConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_LinuxPodSandboxConfig proto.InternalMessageInfo + +func (m *LinuxPodSandboxConfig) GetCgroupParent() string { + if m != nil { + return m.CgroupParent + } + return "" +} + +func (m *LinuxPodSandboxConfig) GetSecurityContext() *LinuxSandboxSecurityContext { + if m != nil { + return m.SecurityContext + } + return nil +} + +func (m *LinuxPodSandboxConfig) GetSysctls() map[string]string { + if m != nil { + return m.Sysctls + } + return nil +} + +func (m *LinuxPodSandboxConfig) GetOverhead() *LinuxContainerResources { + if m != nil { + return m.Overhead + } + return nil +} + +func (m *LinuxPodSandboxConfig) GetResources() *LinuxContainerResources { + if m != nil { + return m.Resources + } + return nil +} + +// PodSandboxMetadata holds all necessary information for building the sandbox name. +// The container runtime is encouraged to expose the metadata associated with the +// PodSandbox in its user interface for better user experience. For example, +// the runtime can construct a unique PodSandboxName based on the metadata. +type PodSandboxMetadata struct { + // Pod name of the sandbox. Same as the pod name in the Pod ObjectMeta. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Pod UID of the sandbox. Same as the pod UID in the Pod ObjectMeta. + Uid string `protobuf:"bytes,2,opt,name=uid,proto3" json:"uid,omitempty"` + // Pod namespace of the sandbox. Same as the pod namespace in the Pod ObjectMeta. + Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` + // Attempt number of creating the sandbox. Default: 0. + Attempt uint32 `protobuf:"varint,4,opt,name=attempt,proto3" json:"attempt,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PodSandboxMetadata) Reset() { *m = PodSandboxMetadata{} } +func (*PodSandboxMetadata) ProtoMessage() {} +func (*PodSandboxMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{12} +} +func (m *PodSandboxMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSandboxMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PodSandboxMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PodSandboxMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSandboxMetadata.Merge(m, src) +} +func (m *PodSandboxMetadata) XXX_Size() int { + return m.Size() +} +func (m *PodSandboxMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_PodSandboxMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSandboxMetadata proto.InternalMessageInfo + +func (m *PodSandboxMetadata) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PodSandboxMetadata) GetUid() string { + if m != nil { + return m.Uid + } + return "" +} + +func (m *PodSandboxMetadata) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *PodSandboxMetadata) GetAttempt() uint32 { + if m != nil { + return m.Attempt + } + return 0 +} + +// PodSandboxConfig holds all the required and optional fields for creating a +// sandbox. +type PodSandboxConfig struct { + // Metadata of the sandbox. This information will uniquely identify the + // sandbox, and the runtime should leverage this to ensure correct + // operation. The runtime may also use this information to improve UX, such + // as by constructing a readable name. + Metadata *PodSandboxMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Hostname of the sandbox. Hostname could only be empty when the pod + // network namespace is NODE. + Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3" json:"hostname,omitempty"` + // Path to the directory on the host in which container log files are + // stored. + // By default the log of a container going into the LogDirectory will be + // hooked up to STDOUT and STDERR. However, the LogDirectory may contain + // binary log files with structured logging data from the individual + // containers. For example, the files might be newline separated JSON + // structured logs, systemd-journald journal files, gRPC trace files, etc. + // E.g., + // + // PodSandboxConfig.LogDirectory = `/var/log/pods/__/` + // ContainerConfig.LogPath = `containerName/Instance#.log` + LogDirectory string `protobuf:"bytes,3,opt,name=log_directory,json=logDirectory,proto3" json:"log_directory,omitempty"` + // DNS config for the sandbox. + DnsConfig *DNSConfig `protobuf:"bytes,4,opt,name=dns_config,json=dnsConfig,proto3" json:"dns_config,omitempty"` + // Port mappings for the sandbox. + PortMappings []*PortMapping `protobuf:"bytes,5,rep,name=port_mappings,json=portMappings,proto3" json:"port_mappings,omitempty"` + // Key-value pairs that may be used to scope and select individual resources. + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Unstructured key-value map that may be set by the kubelet to store and + // retrieve arbitrary metadata. This will include any annotations set on a + // pod through the Kubernetes API. + // + // Annotations MUST NOT be altered by the runtime; the annotations stored + // here MUST be returned in the PodSandboxStatus associated with the pod + // this PodSandboxConfig creates. + // + // In general, in order to preserve a well-defined interface between the + // kubelet and the container runtime, annotations SHOULD NOT influence + // runtime behaviour. + // + // Annotations can also be useful for runtime authors to experiment with + // new features that are opaque to the Kubernetes APIs (both user-facing + // and the CRI). Whenever possible, however, runtime authors SHOULD + // consider proposing new typed fields for any new features instead. + Annotations map[string]string `protobuf:"bytes,7,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Optional configurations specific to Linux hosts. + Linux *LinuxPodSandboxConfig `protobuf:"bytes,8,opt,name=linux,proto3" json:"linux,omitempty"` + // Optional configurations specific to Windows hosts. + Windows *WindowsPodSandboxConfig `protobuf:"bytes,9,opt,name=windows,proto3" json:"windows,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PodSandboxConfig) Reset() { *m = PodSandboxConfig{} } +func (*PodSandboxConfig) ProtoMessage() {} +func (*PodSandboxConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{13} +} +func (m *PodSandboxConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSandboxConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PodSandboxConfig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PodSandboxConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSandboxConfig.Merge(m, src) +} +func (m *PodSandboxConfig) XXX_Size() int { + return m.Size() +} +func (m *PodSandboxConfig) XXX_DiscardUnknown() { + xxx_messageInfo_PodSandboxConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSandboxConfig proto.InternalMessageInfo + +func (m *PodSandboxConfig) GetMetadata() *PodSandboxMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *PodSandboxConfig) GetHostname() string { + if m != nil { + return m.Hostname + } + return "" +} + +func (m *PodSandboxConfig) GetLogDirectory() string { + if m != nil { + return m.LogDirectory + } + return "" +} + +func (m *PodSandboxConfig) GetDnsConfig() *DNSConfig { + if m != nil { + return m.DnsConfig + } + return nil +} + +func (m *PodSandboxConfig) GetPortMappings() []*PortMapping { + if m != nil { + return m.PortMappings + } + return nil +} + +func (m *PodSandboxConfig) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *PodSandboxConfig) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +func (m *PodSandboxConfig) GetLinux() *LinuxPodSandboxConfig { + if m != nil { + return m.Linux + } + return nil +} + +func (m *PodSandboxConfig) GetWindows() *WindowsPodSandboxConfig { + if m != nil { + return m.Windows + } + return nil +} + +type RunPodSandboxRequest struct { + // Configuration for creating a PodSandbox. + Config *PodSandboxConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + // Named runtime configuration to use for this PodSandbox. + // If the runtime handler is unknown, this request should be rejected. An + // empty string should select the default handler, equivalent to the + // behavior before this feature was added. + // See https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + RuntimeHandler string `protobuf:"bytes,2,opt,name=runtime_handler,json=runtimeHandler,proto3" json:"runtime_handler,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RunPodSandboxRequest) Reset() { *m = RunPodSandboxRequest{} } +func (*RunPodSandboxRequest) ProtoMessage() {} +func (*RunPodSandboxRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{14} +} +func (m *RunPodSandboxRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunPodSandboxRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RunPodSandboxRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RunPodSandboxRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunPodSandboxRequest.Merge(m, src) +} +func (m *RunPodSandboxRequest) XXX_Size() int { + return m.Size() +} +func (m *RunPodSandboxRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RunPodSandboxRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RunPodSandboxRequest proto.InternalMessageInfo + +func (m *RunPodSandboxRequest) GetConfig() *PodSandboxConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *RunPodSandboxRequest) GetRuntimeHandler() string { + if m != nil { + return m.RuntimeHandler + } + return "" +} + +type RunPodSandboxResponse struct { + // ID of the PodSandbox to run. + PodSandboxId string `protobuf:"bytes,1,opt,name=pod_sandbox_id,json=podSandboxId,proto3" json:"pod_sandbox_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RunPodSandboxResponse) Reset() { *m = RunPodSandboxResponse{} } +func (*RunPodSandboxResponse) ProtoMessage() {} +func (*RunPodSandboxResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{15} +} +func (m *RunPodSandboxResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunPodSandboxResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RunPodSandboxResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RunPodSandboxResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunPodSandboxResponse.Merge(m, src) +} +func (m *RunPodSandboxResponse) XXX_Size() int { + return m.Size() +} +func (m *RunPodSandboxResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RunPodSandboxResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RunPodSandboxResponse proto.InternalMessageInfo + +func (m *RunPodSandboxResponse) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +type StopPodSandboxRequest struct { + // ID of the PodSandbox to stop. + PodSandboxId string `protobuf:"bytes,1,opt,name=pod_sandbox_id,json=podSandboxId,proto3" json:"pod_sandbox_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopPodSandboxRequest) Reset() { *m = StopPodSandboxRequest{} } +func (*StopPodSandboxRequest) ProtoMessage() {} +func (*StopPodSandboxRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{16} +} +func (m *StopPodSandboxRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StopPodSandboxRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StopPodSandboxRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StopPodSandboxRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopPodSandboxRequest.Merge(m, src) +} +func (m *StopPodSandboxRequest) XXX_Size() int { + return m.Size() +} +func (m *StopPodSandboxRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StopPodSandboxRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StopPodSandboxRequest proto.InternalMessageInfo + +func (m *StopPodSandboxRequest) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +type StopPodSandboxResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopPodSandboxResponse) Reset() { *m = StopPodSandboxResponse{} } +func (*StopPodSandboxResponse) ProtoMessage() {} +func (*StopPodSandboxResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{17} +} +func (m *StopPodSandboxResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StopPodSandboxResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StopPodSandboxResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StopPodSandboxResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopPodSandboxResponse.Merge(m, src) +} +func (m *StopPodSandboxResponse) XXX_Size() int { + return m.Size() +} +func (m *StopPodSandboxResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StopPodSandboxResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StopPodSandboxResponse proto.InternalMessageInfo + +type RemovePodSandboxRequest struct { + // ID of the PodSandbox to remove. + PodSandboxId string `protobuf:"bytes,1,opt,name=pod_sandbox_id,json=podSandboxId,proto3" json:"pod_sandbox_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemovePodSandboxRequest) Reset() { *m = RemovePodSandboxRequest{} } +func (*RemovePodSandboxRequest) ProtoMessage() {} +func (*RemovePodSandboxRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{18} +} +func (m *RemovePodSandboxRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemovePodSandboxRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemovePodSandboxRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemovePodSandboxRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemovePodSandboxRequest.Merge(m, src) +} +func (m *RemovePodSandboxRequest) XXX_Size() int { + return m.Size() +} +func (m *RemovePodSandboxRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemovePodSandboxRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RemovePodSandboxRequest proto.InternalMessageInfo + +func (m *RemovePodSandboxRequest) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +type RemovePodSandboxResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemovePodSandboxResponse) Reset() { *m = RemovePodSandboxResponse{} } +func (*RemovePodSandboxResponse) ProtoMessage() {} +func (*RemovePodSandboxResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{19} +} +func (m *RemovePodSandboxResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemovePodSandboxResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemovePodSandboxResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemovePodSandboxResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemovePodSandboxResponse.Merge(m, src) +} +func (m *RemovePodSandboxResponse) XXX_Size() int { + return m.Size() +} +func (m *RemovePodSandboxResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RemovePodSandboxResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RemovePodSandboxResponse proto.InternalMessageInfo + +type PodSandboxStatusRequest struct { + // ID of the PodSandbox for which to retrieve status. + PodSandboxId string `protobuf:"bytes,1,opt,name=pod_sandbox_id,json=podSandboxId,proto3" json:"pod_sandbox_id,omitempty"` + // Verbose indicates whether to return extra information about the pod sandbox. + Verbose bool `protobuf:"varint,2,opt,name=verbose,proto3" json:"verbose,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PodSandboxStatusRequest) Reset() { *m = PodSandboxStatusRequest{} } +func (*PodSandboxStatusRequest) ProtoMessage() {} +func (*PodSandboxStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{20} +} +func (m *PodSandboxStatusRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSandboxStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PodSandboxStatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PodSandboxStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSandboxStatusRequest.Merge(m, src) +} +func (m *PodSandboxStatusRequest) XXX_Size() int { + return m.Size() +} +func (m *PodSandboxStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PodSandboxStatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSandboxStatusRequest proto.InternalMessageInfo + +func (m *PodSandboxStatusRequest) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +func (m *PodSandboxStatusRequest) GetVerbose() bool { + if m != nil { + return m.Verbose + } + return false +} + +// PodIP represents an ip of a Pod +type PodIP struct { + // an ip is a string representation of an IPv4 or an IPv6 + Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PodIP) Reset() { *m = PodIP{} } +func (*PodIP) ProtoMessage() {} +func (*PodIP) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{21} +} +func (m *PodIP) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PodIP.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PodIP) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodIP.Merge(m, src) +} +func (m *PodIP) XXX_Size() int { + return m.Size() +} +func (m *PodIP) XXX_DiscardUnknown() { + xxx_messageInfo_PodIP.DiscardUnknown(m) +} + +var xxx_messageInfo_PodIP proto.InternalMessageInfo + +func (m *PodIP) GetIp() string { + if m != nil { + return m.Ip + } + return "" +} + +// PodSandboxNetworkStatus is the status of the network for a PodSandbox. +// Currently ignored for pods sharing the host networking namespace. +type PodSandboxNetworkStatus struct { + // IP address of the PodSandbox. + Ip string `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` + // list of additional ips (not inclusive of PodSandboxNetworkStatus.Ip) of the PodSandBoxNetworkStatus + AdditionalIps []*PodIP `protobuf:"bytes,2,rep,name=additional_ips,json=additionalIps,proto3" json:"additional_ips,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PodSandboxNetworkStatus) Reset() { *m = PodSandboxNetworkStatus{} } +func (*PodSandboxNetworkStatus) ProtoMessage() {} +func (*PodSandboxNetworkStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{22} +} +func (m *PodSandboxNetworkStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSandboxNetworkStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PodSandboxNetworkStatus.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PodSandboxNetworkStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSandboxNetworkStatus.Merge(m, src) +} +func (m *PodSandboxNetworkStatus) XXX_Size() int { + return m.Size() +} +func (m *PodSandboxNetworkStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodSandboxNetworkStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSandboxNetworkStatus proto.InternalMessageInfo + +func (m *PodSandboxNetworkStatus) GetIp() string { + if m != nil { + return m.Ip + } + return "" +} + +func (m *PodSandboxNetworkStatus) GetAdditionalIps() []*PodIP { + if m != nil { + return m.AdditionalIps + } + return nil +} + +// Namespace contains paths to the namespaces. +type Namespace struct { + // Namespace options for Linux namespaces. + Options *NamespaceOption `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Namespace) Reset() { *m = Namespace{} } +func (*Namespace) ProtoMessage() {} +func (*Namespace) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{23} +} +func (m *Namespace) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Namespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Namespace.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Namespace) XXX_Merge(src proto.Message) { + xxx_messageInfo_Namespace.Merge(m, src) +} +func (m *Namespace) XXX_Size() int { + return m.Size() +} +func (m *Namespace) XXX_DiscardUnknown() { + xxx_messageInfo_Namespace.DiscardUnknown(m) +} + +var xxx_messageInfo_Namespace proto.InternalMessageInfo + +func (m *Namespace) GetOptions() *NamespaceOption { + if m != nil { + return m.Options + } + return nil +} + +// LinuxSandboxStatus contains status specific to Linux sandboxes. +type LinuxPodSandboxStatus struct { + // Paths to the sandbox's namespaces. + Namespaces *Namespace `protobuf:"bytes,1,opt,name=namespaces,proto3" json:"namespaces,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LinuxPodSandboxStatus) Reset() { *m = LinuxPodSandboxStatus{} } +func (*LinuxPodSandboxStatus) ProtoMessage() {} +func (*LinuxPodSandboxStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{24} +} +func (m *LinuxPodSandboxStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LinuxPodSandboxStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LinuxPodSandboxStatus.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LinuxPodSandboxStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_LinuxPodSandboxStatus.Merge(m, src) +} +func (m *LinuxPodSandboxStatus) XXX_Size() int { + return m.Size() +} +func (m *LinuxPodSandboxStatus) XXX_DiscardUnknown() { + xxx_messageInfo_LinuxPodSandboxStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_LinuxPodSandboxStatus proto.InternalMessageInfo + +func (m *LinuxPodSandboxStatus) GetNamespaces() *Namespace { + if m != nil { + return m.Namespaces + } + return nil +} + +// PodSandboxStatus contains the status of the PodSandbox. +type PodSandboxStatus struct { + // ID of the sandbox. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Metadata of the sandbox. + Metadata *PodSandboxMetadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + // State of the sandbox. + State PodSandboxState `protobuf:"varint,3,opt,name=state,proto3,enum=runtime.v1.PodSandboxState" json:"state,omitempty"` + // Creation timestamp of the sandbox in nanoseconds. Must be > 0. + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Network contains network status if network is handled by the runtime. + Network *PodSandboxNetworkStatus `protobuf:"bytes,5,opt,name=network,proto3" json:"network,omitempty"` + // Linux-specific status to a pod sandbox. + Linux *LinuxPodSandboxStatus `protobuf:"bytes,6,opt,name=linux,proto3" json:"linux,omitempty"` + // Labels are key-value pairs that may be used to scope and select individual resources. + Labels map[string]string `protobuf:"bytes,7,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Unstructured key-value map holding arbitrary metadata. + // Annotations MUST NOT be altered by the runtime; the value of this field + // MUST be identical to that of the corresponding PodSandboxConfig used to + // instantiate the pod sandbox this status represents. + Annotations map[string]string `protobuf:"bytes,8,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // runtime configuration used for this PodSandbox. + RuntimeHandler string `protobuf:"bytes,9,opt,name=runtime_handler,json=runtimeHandler,proto3" json:"runtime_handler,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PodSandboxStatus) Reset() { *m = PodSandboxStatus{} } +func (*PodSandboxStatus) ProtoMessage() {} +func (*PodSandboxStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{25} +} +func (m *PodSandboxStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSandboxStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PodSandboxStatus.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PodSandboxStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSandboxStatus.Merge(m, src) +} +func (m *PodSandboxStatus) XXX_Size() int { + return m.Size() +} +func (m *PodSandboxStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodSandboxStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSandboxStatus proto.InternalMessageInfo + +func (m *PodSandboxStatus) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *PodSandboxStatus) GetMetadata() *PodSandboxMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *PodSandboxStatus) GetState() PodSandboxState { + if m != nil { + return m.State + } + return PodSandboxState_SANDBOX_READY +} + +func (m *PodSandboxStatus) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *PodSandboxStatus) GetNetwork() *PodSandboxNetworkStatus { + if m != nil { + return m.Network + } + return nil +} + +func (m *PodSandboxStatus) GetLinux() *LinuxPodSandboxStatus { + if m != nil { + return m.Linux + } + return nil +} + +func (m *PodSandboxStatus) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *PodSandboxStatus) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +func (m *PodSandboxStatus) GetRuntimeHandler() string { + if m != nil { + return m.RuntimeHandler + } + return "" +} + +type PodSandboxStatusResponse struct { + // Status of the PodSandbox. + Status *PodSandboxStatus `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // Info is extra information of the PodSandbox. The key could be arbitrary string, and + // value should be in json format. The information could include anything useful for + // debug, e.g. network namespace for linux container based container runtime. + // It should only be returned non-empty when Verbose is true. + Info map[string]string `protobuf:"bytes,2,rep,name=info,proto3" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Container statuses + ContainersStatuses []*ContainerStatus `protobuf:"bytes,3,rep,name=containers_statuses,json=containersStatuses,proto3" json:"containers_statuses,omitempty"` + // Timestamp at which container and pod statuses were recorded + Timestamp int64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PodSandboxStatusResponse) Reset() { *m = PodSandboxStatusResponse{} } +func (*PodSandboxStatusResponse) ProtoMessage() {} +func (*PodSandboxStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{26} +} +func (m *PodSandboxStatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSandboxStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PodSandboxStatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PodSandboxStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSandboxStatusResponse.Merge(m, src) +} +func (m *PodSandboxStatusResponse) XXX_Size() int { + return m.Size() +} +func (m *PodSandboxStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PodSandboxStatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSandboxStatusResponse proto.InternalMessageInfo + +func (m *PodSandboxStatusResponse) GetStatus() *PodSandboxStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *PodSandboxStatusResponse) GetInfo() map[string]string { + if m != nil { + return m.Info + } + return nil +} + +func (m *PodSandboxStatusResponse) GetContainersStatuses() []*ContainerStatus { + if m != nil { + return m.ContainersStatuses + } + return nil +} + +func (m *PodSandboxStatusResponse) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// PodSandboxStateValue is the wrapper of PodSandboxState. +type PodSandboxStateValue struct { + // State of the sandbox. + State PodSandboxState `protobuf:"varint,1,opt,name=state,proto3,enum=runtime.v1.PodSandboxState" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PodSandboxStateValue) Reset() { *m = PodSandboxStateValue{} } +func (*PodSandboxStateValue) ProtoMessage() {} +func (*PodSandboxStateValue) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{27} +} +func (m *PodSandboxStateValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSandboxStateValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PodSandboxStateValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PodSandboxStateValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSandboxStateValue.Merge(m, src) +} +func (m *PodSandboxStateValue) XXX_Size() int { + return m.Size() +} +func (m *PodSandboxStateValue) XXX_DiscardUnknown() { + xxx_messageInfo_PodSandboxStateValue.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSandboxStateValue proto.InternalMessageInfo + +func (m *PodSandboxStateValue) GetState() PodSandboxState { + if m != nil { + return m.State + } + return PodSandboxState_SANDBOX_READY +} + +// PodSandboxFilter is used to filter a list of PodSandboxes. +// All those fields are combined with 'AND' +type PodSandboxFilter struct { + // ID of the sandbox. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // State of the sandbox. + State *PodSandboxStateValue `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + // LabelSelector to select matches. + // Only api.MatchLabels is supported for now and the requirements + // are ANDed. MatchExpressions is not supported yet. + LabelSelector map[string]string `protobuf:"bytes,3,rep,name=label_selector,json=labelSelector,proto3" json:"label_selector,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PodSandboxFilter) Reset() { *m = PodSandboxFilter{} } +func (*PodSandboxFilter) ProtoMessage() {} +func (*PodSandboxFilter) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{28} +} +func (m *PodSandboxFilter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSandboxFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PodSandboxFilter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PodSandboxFilter) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSandboxFilter.Merge(m, src) +} +func (m *PodSandboxFilter) XXX_Size() int { + return m.Size() +} +func (m *PodSandboxFilter) XXX_DiscardUnknown() { + xxx_messageInfo_PodSandboxFilter.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSandboxFilter proto.InternalMessageInfo + +func (m *PodSandboxFilter) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *PodSandboxFilter) GetState() *PodSandboxStateValue { + if m != nil { + return m.State + } + return nil +} + +func (m *PodSandboxFilter) GetLabelSelector() map[string]string { + if m != nil { + return m.LabelSelector + } + return nil +} + +type ListPodSandboxRequest struct { + // PodSandboxFilter to filter a list of PodSandboxes. + Filter *PodSandboxFilter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListPodSandboxRequest) Reset() { *m = ListPodSandboxRequest{} } +func (*ListPodSandboxRequest) ProtoMessage() {} +func (*ListPodSandboxRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{29} +} +func (m *ListPodSandboxRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListPodSandboxRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListPodSandboxRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListPodSandboxRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListPodSandboxRequest.Merge(m, src) +} +func (m *ListPodSandboxRequest) XXX_Size() int { + return m.Size() +} +func (m *ListPodSandboxRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListPodSandboxRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListPodSandboxRequest proto.InternalMessageInfo + +func (m *ListPodSandboxRequest) GetFilter() *PodSandboxFilter { + if m != nil { + return m.Filter + } + return nil +} + +// PodSandbox contains minimal information about a sandbox. +type PodSandbox struct { + // ID of the PodSandbox. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Metadata of the PodSandbox. + Metadata *PodSandboxMetadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + // State of the PodSandbox. + State PodSandboxState `protobuf:"varint,3,opt,name=state,proto3,enum=runtime.v1.PodSandboxState" json:"state,omitempty"` + // Creation timestamps of the PodSandbox in nanoseconds. Must be > 0. + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Labels of the PodSandbox. + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Unstructured key-value map holding arbitrary metadata. + // Annotations MUST NOT be altered by the runtime; the value of this field + // MUST be identical to that of the corresponding PodSandboxConfig used to + // instantiate this PodSandbox. + Annotations map[string]string `protobuf:"bytes,6,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // runtime configuration used for this PodSandbox. + RuntimeHandler string `protobuf:"bytes,7,opt,name=runtime_handler,json=runtimeHandler,proto3" json:"runtime_handler,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PodSandbox) Reset() { *m = PodSandbox{} } +func (*PodSandbox) ProtoMessage() {} +func (*PodSandbox) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{30} +} +func (m *PodSandbox) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSandbox) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PodSandbox.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PodSandbox) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSandbox.Merge(m, src) +} +func (m *PodSandbox) XXX_Size() int { + return m.Size() +} +func (m *PodSandbox) XXX_DiscardUnknown() { + xxx_messageInfo_PodSandbox.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSandbox proto.InternalMessageInfo + +func (m *PodSandbox) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *PodSandbox) GetMetadata() *PodSandboxMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *PodSandbox) GetState() PodSandboxState { + if m != nil { + return m.State + } + return PodSandboxState_SANDBOX_READY +} + +func (m *PodSandbox) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *PodSandbox) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *PodSandbox) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +func (m *PodSandbox) GetRuntimeHandler() string { + if m != nil { + return m.RuntimeHandler + } + return "" +} + +type ListPodSandboxResponse struct { + // List of PodSandboxes. + Items []*PodSandbox `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListPodSandboxResponse) Reset() { *m = ListPodSandboxResponse{} } +func (*ListPodSandboxResponse) ProtoMessage() {} +func (*ListPodSandboxResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{31} +} +func (m *ListPodSandboxResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListPodSandboxResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListPodSandboxResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListPodSandboxResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListPodSandboxResponse.Merge(m, src) +} +func (m *ListPodSandboxResponse) XXX_Size() int { + return m.Size() +} +func (m *ListPodSandboxResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListPodSandboxResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListPodSandboxResponse proto.InternalMessageInfo + +func (m *ListPodSandboxResponse) GetItems() []*PodSandbox { + if m != nil { + return m.Items + } + return nil +} + +type PodSandboxStatsRequest struct { + // ID of the pod sandbox for which to retrieve stats. + PodSandboxId string `protobuf:"bytes,1,opt,name=pod_sandbox_id,json=podSandboxId,proto3" json:"pod_sandbox_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PodSandboxStatsRequest) Reset() { *m = PodSandboxStatsRequest{} } +func (*PodSandboxStatsRequest) ProtoMessage() {} +func (*PodSandboxStatsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{32} +} +func (m *PodSandboxStatsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSandboxStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PodSandboxStatsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PodSandboxStatsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSandboxStatsRequest.Merge(m, src) +} +func (m *PodSandboxStatsRequest) XXX_Size() int { + return m.Size() +} +func (m *PodSandboxStatsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PodSandboxStatsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSandboxStatsRequest proto.InternalMessageInfo + +func (m *PodSandboxStatsRequest) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +type PodSandboxStatsResponse struct { + Stats *PodSandboxStats `protobuf:"bytes,1,opt,name=stats,proto3" json:"stats,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PodSandboxStatsResponse) Reset() { *m = PodSandboxStatsResponse{} } +func (*PodSandboxStatsResponse) ProtoMessage() {} +func (*PodSandboxStatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{33} +} +func (m *PodSandboxStatsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSandboxStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PodSandboxStatsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PodSandboxStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSandboxStatsResponse.Merge(m, src) +} +func (m *PodSandboxStatsResponse) XXX_Size() int { + return m.Size() +} +func (m *PodSandboxStatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PodSandboxStatsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSandboxStatsResponse proto.InternalMessageInfo + +func (m *PodSandboxStatsResponse) GetStats() *PodSandboxStats { + if m != nil { + return m.Stats + } + return nil +} + +// PodSandboxStatsFilter is used to filter the list of pod sandboxes to retrieve stats for. +// All those fields are combined with 'AND'. +type PodSandboxStatsFilter struct { + // ID of the pod sandbox. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // LabelSelector to select matches. + // Only api.MatchLabels is supported for now and the requirements + // are ANDed. MatchExpressions is not supported yet. + LabelSelector map[string]string `protobuf:"bytes,2,rep,name=label_selector,json=labelSelector,proto3" json:"label_selector,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PodSandboxStatsFilter) Reset() { *m = PodSandboxStatsFilter{} } +func (*PodSandboxStatsFilter) ProtoMessage() {} +func (*PodSandboxStatsFilter) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{34} +} +func (m *PodSandboxStatsFilter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSandboxStatsFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PodSandboxStatsFilter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PodSandboxStatsFilter) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSandboxStatsFilter.Merge(m, src) +} +func (m *PodSandboxStatsFilter) XXX_Size() int { + return m.Size() +} +func (m *PodSandboxStatsFilter) XXX_DiscardUnknown() { + xxx_messageInfo_PodSandboxStatsFilter.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSandboxStatsFilter proto.InternalMessageInfo + +func (m *PodSandboxStatsFilter) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *PodSandboxStatsFilter) GetLabelSelector() map[string]string { + if m != nil { + return m.LabelSelector + } + return nil +} + +type ListPodSandboxStatsRequest struct { + // Filter for the list request. + Filter *PodSandboxStatsFilter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListPodSandboxStatsRequest) Reset() { *m = ListPodSandboxStatsRequest{} } +func (*ListPodSandboxStatsRequest) ProtoMessage() {} +func (*ListPodSandboxStatsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{35} +} +func (m *ListPodSandboxStatsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListPodSandboxStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListPodSandboxStatsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListPodSandboxStatsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListPodSandboxStatsRequest.Merge(m, src) +} +func (m *ListPodSandboxStatsRequest) XXX_Size() int { + return m.Size() +} +func (m *ListPodSandboxStatsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListPodSandboxStatsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListPodSandboxStatsRequest proto.InternalMessageInfo + +func (m *ListPodSandboxStatsRequest) GetFilter() *PodSandboxStatsFilter { + if m != nil { + return m.Filter + } + return nil +} + +type ListPodSandboxStatsResponse struct { + // Stats of the pod sandbox. + Stats []*PodSandboxStats `protobuf:"bytes,1,rep,name=stats,proto3" json:"stats,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListPodSandboxStatsResponse) Reset() { *m = ListPodSandboxStatsResponse{} } +func (*ListPodSandboxStatsResponse) ProtoMessage() {} +func (*ListPodSandboxStatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{36} +} +func (m *ListPodSandboxStatsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListPodSandboxStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListPodSandboxStatsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListPodSandboxStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListPodSandboxStatsResponse.Merge(m, src) +} +func (m *ListPodSandboxStatsResponse) XXX_Size() int { + return m.Size() +} +func (m *ListPodSandboxStatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListPodSandboxStatsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListPodSandboxStatsResponse proto.InternalMessageInfo + +func (m *ListPodSandboxStatsResponse) GetStats() []*PodSandboxStats { + if m != nil { + return m.Stats + } + return nil +} + +// PodSandboxAttributes provides basic information of the pod sandbox. +type PodSandboxAttributes struct { + // ID of the pod sandbox. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Metadata of the pod sandbox. + Metadata *PodSandboxMetadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Key-value pairs that may be used to scope and select individual resources. + Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Unstructured key-value map holding arbitrary metadata. + // Annotations MUST NOT be altered by the runtime; the value of this field + // MUST be identical to that of the corresponding PodSandboxStatus used to + // instantiate the PodSandbox this status represents. + Annotations map[string]string `protobuf:"bytes,4,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PodSandboxAttributes) Reset() { *m = PodSandboxAttributes{} } +func (*PodSandboxAttributes) ProtoMessage() {} +func (*PodSandboxAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{37} +} +func (m *PodSandboxAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSandboxAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PodSandboxAttributes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PodSandboxAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSandboxAttributes.Merge(m, src) +} +func (m *PodSandboxAttributes) XXX_Size() int { + return m.Size() +} +func (m *PodSandboxAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_PodSandboxAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSandboxAttributes proto.InternalMessageInfo + +func (m *PodSandboxAttributes) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *PodSandboxAttributes) GetMetadata() *PodSandboxMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *PodSandboxAttributes) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *PodSandboxAttributes) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +// PodSandboxStats provides the resource usage statistics for a pod. +// The linux or windows field will be populated depending on the platform. +type PodSandboxStats struct { + // Information of the pod. + Attributes *PodSandboxAttributes `protobuf:"bytes,1,opt,name=attributes,proto3" json:"attributes,omitempty"` + // Stats from linux. + Linux *LinuxPodSandboxStats `protobuf:"bytes,2,opt,name=linux,proto3" json:"linux,omitempty"` + // Stats from windows. + Windows *WindowsPodSandboxStats `protobuf:"bytes,3,opt,name=windows,proto3" json:"windows,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PodSandboxStats) Reset() { *m = PodSandboxStats{} } +func (*PodSandboxStats) ProtoMessage() {} +func (*PodSandboxStats) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{38} +} +func (m *PodSandboxStats) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSandboxStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PodSandboxStats.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PodSandboxStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSandboxStats.Merge(m, src) +} +func (m *PodSandboxStats) XXX_Size() int { + return m.Size() +} +func (m *PodSandboxStats) XXX_DiscardUnknown() { + xxx_messageInfo_PodSandboxStats.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSandboxStats proto.InternalMessageInfo + +func (m *PodSandboxStats) GetAttributes() *PodSandboxAttributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *PodSandboxStats) GetLinux() *LinuxPodSandboxStats { + if m != nil { + return m.Linux + } + return nil +} + +func (m *PodSandboxStats) GetWindows() *WindowsPodSandboxStats { + if m != nil { + return m.Windows + } + return nil +} + +// LinuxPodSandboxStats provides the resource usage statistics for a pod sandbox on linux. +type LinuxPodSandboxStats struct { + // CPU usage gathered for the pod sandbox. + Cpu *CpuUsage `protobuf:"bytes,1,opt,name=cpu,proto3" json:"cpu,omitempty"` + // Memory usage gathered for the pod sandbox. + Memory *MemoryUsage `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty"` + // Network usage gathered for the pod sandbox + Network *NetworkUsage `protobuf:"bytes,3,opt,name=network,proto3" json:"network,omitempty"` + // Stats pertaining to processes in the pod sandbox. + Process *ProcessUsage `protobuf:"bytes,4,opt,name=process,proto3" json:"process,omitempty"` + // Stats of containers in the measured pod sandbox. + Containers []*ContainerStats `protobuf:"bytes,5,rep,name=containers,proto3" json:"containers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LinuxPodSandboxStats) Reset() { *m = LinuxPodSandboxStats{} } +func (*LinuxPodSandboxStats) ProtoMessage() {} +func (*LinuxPodSandboxStats) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{39} +} +func (m *LinuxPodSandboxStats) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LinuxPodSandboxStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LinuxPodSandboxStats.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LinuxPodSandboxStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_LinuxPodSandboxStats.Merge(m, src) +} +func (m *LinuxPodSandboxStats) XXX_Size() int { + return m.Size() +} +func (m *LinuxPodSandboxStats) XXX_DiscardUnknown() { + xxx_messageInfo_LinuxPodSandboxStats.DiscardUnknown(m) +} + +var xxx_messageInfo_LinuxPodSandboxStats proto.InternalMessageInfo + +func (m *LinuxPodSandboxStats) GetCpu() *CpuUsage { + if m != nil { + return m.Cpu + } + return nil +} + +func (m *LinuxPodSandboxStats) GetMemory() *MemoryUsage { + if m != nil { + return m.Memory + } + return nil +} + +func (m *LinuxPodSandboxStats) GetNetwork() *NetworkUsage { + if m != nil { + return m.Network + } + return nil +} + +func (m *LinuxPodSandboxStats) GetProcess() *ProcessUsage { + if m != nil { + return m.Process + } + return nil +} + +func (m *LinuxPodSandboxStats) GetContainers() []*ContainerStats { + if m != nil { + return m.Containers + } + return nil +} + +// WindowsPodSandboxStats provides the resource usage statistics for a pod sandbox on windows +type WindowsPodSandboxStats struct { + // CPU usage gathered for the pod sandbox. + Cpu *WindowsCpuUsage `protobuf:"bytes,1,opt,name=cpu,proto3" json:"cpu,omitempty"` + // Memory usage gathered for the pod sandbox. + Memory *WindowsMemoryUsage `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty"` + // Network usage gathered for the pod sandbox + Network *WindowsNetworkUsage `protobuf:"bytes,3,opt,name=network,proto3" json:"network,omitempty"` + // Stats pertaining to processes in the pod sandbox. + Process *WindowsProcessUsage `protobuf:"bytes,4,opt,name=process,proto3" json:"process,omitempty"` + // Stats of containers in the measured pod sandbox. + Containers []*WindowsContainerStats `protobuf:"bytes,5,rep,name=containers,proto3" json:"containers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WindowsPodSandboxStats) Reset() { *m = WindowsPodSandboxStats{} } +func (*WindowsPodSandboxStats) ProtoMessage() {} +func (*WindowsPodSandboxStats) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{40} +} +func (m *WindowsPodSandboxStats) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsPodSandboxStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WindowsPodSandboxStats.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WindowsPodSandboxStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsPodSandboxStats.Merge(m, src) +} +func (m *WindowsPodSandboxStats) XXX_Size() int { + return m.Size() +} +func (m *WindowsPodSandboxStats) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsPodSandboxStats.DiscardUnknown(m) +} + +var xxx_messageInfo_WindowsPodSandboxStats proto.InternalMessageInfo + +func (m *WindowsPodSandboxStats) GetCpu() *WindowsCpuUsage { + if m != nil { + return m.Cpu + } + return nil +} + +func (m *WindowsPodSandboxStats) GetMemory() *WindowsMemoryUsage { + if m != nil { + return m.Memory + } + return nil +} + +func (m *WindowsPodSandboxStats) GetNetwork() *WindowsNetworkUsage { + if m != nil { + return m.Network + } + return nil +} + +func (m *WindowsPodSandboxStats) GetProcess() *WindowsProcessUsage { + if m != nil { + return m.Process + } + return nil +} + +func (m *WindowsPodSandboxStats) GetContainers() []*WindowsContainerStats { + if m != nil { + return m.Containers + } + return nil +} + +// NetworkUsage contains data about network resources. +type NetworkUsage struct { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Stats for the default network interface. + DefaultInterface *NetworkInterfaceUsage `protobuf:"bytes,2,opt,name=default_interface,json=defaultInterface,proto3" json:"default_interface,omitempty"` + // Stats for all found network interfaces, excluding the default. + Interfaces []*NetworkInterfaceUsage `protobuf:"bytes,3,rep,name=interfaces,proto3" json:"interfaces,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkUsage) Reset() { *m = NetworkUsage{} } +func (*NetworkUsage) ProtoMessage() {} +func (*NetworkUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{41} +} +func (m *NetworkUsage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NetworkUsage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NetworkUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkUsage.Merge(m, src) +} +func (m *NetworkUsage) XXX_Size() int { + return m.Size() +} +func (m *NetworkUsage) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkUsage proto.InternalMessageInfo + +func (m *NetworkUsage) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *NetworkUsage) GetDefaultInterface() *NetworkInterfaceUsage { + if m != nil { + return m.DefaultInterface + } + return nil +} + +func (m *NetworkUsage) GetInterfaces() []*NetworkInterfaceUsage { + if m != nil { + return m.Interfaces + } + return nil +} + +// WindowsNetworkUsage contains data about network resources specific to Windows. +type WindowsNetworkUsage struct { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Stats for the default network interface. + DefaultInterface *WindowsNetworkInterfaceUsage `protobuf:"bytes,2,opt,name=default_interface,json=defaultInterface,proto3" json:"default_interface,omitempty"` + // Stats for all found network interfaces, excluding the default. + Interfaces []*WindowsNetworkInterfaceUsage `protobuf:"bytes,3,rep,name=interfaces,proto3" json:"interfaces,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WindowsNetworkUsage) Reset() { *m = WindowsNetworkUsage{} } +func (*WindowsNetworkUsage) ProtoMessage() {} +func (*WindowsNetworkUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{42} +} +func (m *WindowsNetworkUsage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsNetworkUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WindowsNetworkUsage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WindowsNetworkUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsNetworkUsage.Merge(m, src) +} +func (m *WindowsNetworkUsage) XXX_Size() int { + return m.Size() +} +func (m *WindowsNetworkUsage) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsNetworkUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_WindowsNetworkUsage proto.InternalMessageInfo + +func (m *WindowsNetworkUsage) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *WindowsNetworkUsage) GetDefaultInterface() *WindowsNetworkInterfaceUsage { + if m != nil { + return m.DefaultInterface + } + return nil +} + +func (m *WindowsNetworkUsage) GetInterfaces() []*WindowsNetworkInterfaceUsage { + if m != nil { + return m.Interfaces + } + return nil +} + +// NetworkInterfaceUsage contains resource value data about a network interface. +type NetworkInterfaceUsage struct { + // The name of the network interface. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Cumulative count of bytes received. + RxBytes *UInt64Value `protobuf:"bytes,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"` + // Cumulative count of receive errors encountered. + RxErrors *UInt64Value `protobuf:"bytes,3,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"` + // Cumulative count of bytes transmitted. + TxBytes *UInt64Value `protobuf:"bytes,4,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"` + // Cumulative count of transmit errors encountered. + TxErrors *UInt64Value `protobuf:"bytes,5,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkInterfaceUsage) Reset() { *m = NetworkInterfaceUsage{} } +func (*NetworkInterfaceUsage) ProtoMessage() {} +func (*NetworkInterfaceUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{43} +} +func (m *NetworkInterfaceUsage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkInterfaceUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NetworkInterfaceUsage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NetworkInterfaceUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkInterfaceUsage.Merge(m, src) +} +func (m *NetworkInterfaceUsage) XXX_Size() int { + return m.Size() +} +func (m *NetworkInterfaceUsage) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkInterfaceUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkInterfaceUsage proto.InternalMessageInfo + +func (m *NetworkInterfaceUsage) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NetworkInterfaceUsage) GetRxBytes() *UInt64Value { + if m != nil { + return m.RxBytes + } + return nil +} + +func (m *NetworkInterfaceUsage) GetRxErrors() *UInt64Value { + if m != nil { + return m.RxErrors + } + return nil +} + +func (m *NetworkInterfaceUsage) GetTxBytes() *UInt64Value { + if m != nil { + return m.TxBytes + } + return nil +} + +func (m *NetworkInterfaceUsage) GetTxErrors() *UInt64Value { + if m != nil { + return m.TxErrors + } + return nil +} + +// WindowsNetworkInterfaceUsage contains resource value data about a network interface specific for Windows. +type WindowsNetworkInterfaceUsage struct { + // The name of the network interface. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Cumulative count of bytes received. + RxBytes *UInt64Value `protobuf:"bytes,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"` + // Cumulative count of receive errors encountered. + RxPacketsDropped *UInt64Value `protobuf:"bytes,3,opt,name=rx_packets_dropped,json=rxPacketsDropped,proto3" json:"rx_packets_dropped,omitempty"` + // Cumulative count of bytes transmitted. + TxBytes *UInt64Value `protobuf:"bytes,4,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"` + // Cumulative count of transmit errors encountered. + TxPacketsDropped *UInt64Value `protobuf:"bytes,5,opt,name=tx_packets_dropped,json=txPacketsDropped,proto3" json:"tx_packets_dropped,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WindowsNetworkInterfaceUsage) Reset() { *m = WindowsNetworkInterfaceUsage{} } +func (*WindowsNetworkInterfaceUsage) ProtoMessage() {} +func (*WindowsNetworkInterfaceUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{44} +} +func (m *WindowsNetworkInterfaceUsage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsNetworkInterfaceUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WindowsNetworkInterfaceUsage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WindowsNetworkInterfaceUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsNetworkInterfaceUsage.Merge(m, src) +} +func (m *WindowsNetworkInterfaceUsage) XXX_Size() int { + return m.Size() +} +func (m *WindowsNetworkInterfaceUsage) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsNetworkInterfaceUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_WindowsNetworkInterfaceUsage proto.InternalMessageInfo + +func (m *WindowsNetworkInterfaceUsage) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *WindowsNetworkInterfaceUsage) GetRxBytes() *UInt64Value { + if m != nil { + return m.RxBytes + } + return nil +} + +func (m *WindowsNetworkInterfaceUsage) GetRxPacketsDropped() *UInt64Value { + if m != nil { + return m.RxPacketsDropped + } + return nil +} + +func (m *WindowsNetworkInterfaceUsage) GetTxBytes() *UInt64Value { + if m != nil { + return m.TxBytes + } + return nil +} + +func (m *WindowsNetworkInterfaceUsage) GetTxPacketsDropped() *UInt64Value { + if m != nil { + return m.TxPacketsDropped + } + return nil +} + +// ProcessUsage are stats pertaining to processes. +type ProcessUsage struct { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Number of processes. + ProcessCount *UInt64Value `protobuf:"bytes,2,opt,name=process_count,json=processCount,proto3" json:"process_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProcessUsage) Reset() { *m = ProcessUsage{} } +func (*ProcessUsage) ProtoMessage() {} +func (*ProcessUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{45} +} +func (m *ProcessUsage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProcessUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProcessUsage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProcessUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProcessUsage.Merge(m, src) +} +func (m *ProcessUsage) XXX_Size() int { + return m.Size() +} +func (m *ProcessUsage) XXX_DiscardUnknown() { + xxx_messageInfo_ProcessUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_ProcessUsage proto.InternalMessageInfo + +func (m *ProcessUsage) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *ProcessUsage) GetProcessCount() *UInt64Value { + if m != nil { + return m.ProcessCount + } + return nil +} + +// WindowsProcessUsage are stats pertaining to processes specific to Windows. +type WindowsProcessUsage struct { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Number of processes. + ProcessCount *UInt64Value `protobuf:"bytes,2,opt,name=process_count,json=processCount,proto3" json:"process_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WindowsProcessUsage) Reset() { *m = WindowsProcessUsage{} } +func (*WindowsProcessUsage) ProtoMessage() {} +func (*WindowsProcessUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{46} +} +func (m *WindowsProcessUsage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsProcessUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WindowsProcessUsage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WindowsProcessUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsProcessUsage.Merge(m, src) +} +func (m *WindowsProcessUsage) XXX_Size() int { + return m.Size() +} +func (m *WindowsProcessUsage) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsProcessUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_WindowsProcessUsage proto.InternalMessageInfo + +func (m *WindowsProcessUsage) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *WindowsProcessUsage) GetProcessCount() *UInt64Value { + if m != nil { + return m.ProcessCount + } + return nil +} + +// ImageSpec is an internal representation of an image. +type ImageSpec struct { + // Container's Image field (e.g. imageID or imageDigest). + Image string `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` + // Unstructured key-value map holding arbitrary metadata. + // ImageSpec Annotations can be used to help the runtime target specific + // images in multi-arch images. + Annotations map[string]string `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The container image reference specified by the user (e.g. image[:tag] or digest). + // Only set if available within the RPC context. + UserSpecifiedImage string `protobuf:"bytes,18,opt,name=user_specified_image,json=userSpecifiedImage,proto3" json:"user_specified_image,omitempty"` + // Runtime handler to use for pulling the image. + // If the runtime handler is unknown, the request should be rejected. + // An empty string would select the default runtime handler. + RuntimeHandler string `protobuf:"bytes,19,opt,name=runtime_handler,json=runtimeHandler,proto3" json:"runtime_handler,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImageSpec) Reset() { *m = ImageSpec{} } +func (*ImageSpec) ProtoMessage() {} +func (*ImageSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{47} +} +func (m *ImageSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ImageSpec.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ImageSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageSpec.Merge(m, src) +} +func (m *ImageSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageSpec proto.InternalMessageInfo + +func (m *ImageSpec) GetImage() string { + if m != nil { + return m.Image + } + return "" +} + +func (m *ImageSpec) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +func (m *ImageSpec) GetUserSpecifiedImage() string { + if m != nil { + return m.UserSpecifiedImage + } + return "" +} + +func (m *ImageSpec) GetRuntimeHandler() string { + if m != nil { + return m.RuntimeHandler + } + return "" +} + +type KeyValue struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KeyValue) Reset() { *m = KeyValue{} } +func (*KeyValue) ProtoMessage() {} +func (*KeyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{48} +} +func (m *KeyValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *KeyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyValue.Merge(m, src) +} +func (m *KeyValue) XXX_Size() int { + return m.Size() +} +func (m *KeyValue) XXX_DiscardUnknown() { + xxx_messageInfo_KeyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyValue proto.InternalMessageInfo + +func (m *KeyValue) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *KeyValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// LinuxContainerResources specifies Linux specific configuration for +// resources. +type LinuxContainerResources struct { + // CPU CFS (Completely Fair Scheduler) period. Default: 0 (not specified). + CpuPeriod int64 `protobuf:"varint,1,opt,name=cpu_period,json=cpuPeriod,proto3" json:"cpu_period,omitempty"` + // CPU CFS (Completely Fair Scheduler) quota. Default: 0 (not specified). + CpuQuota int64 `protobuf:"varint,2,opt,name=cpu_quota,json=cpuQuota,proto3" json:"cpu_quota,omitempty"` + // CPU shares (relative weight vs. other containers). Default: 0 (not specified). + CpuShares int64 `protobuf:"varint,3,opt,name=cpu_shares,json=cpuShares,proto3" json:"cpu_shares,omitempty"` + // Memory limit in bytes. Default: 0 (not specified). + MemoryLimitInBytes int64 `protobuf:"varint,4,opt,name=memory_limit_in_bytes,json=memoryLimitInBytes,proto3" json:"memory_limit_in_bytes,omitempty"` + // OOMScoreAdj adjusts the oom-killer score. Default: 0 (not specified). + OomScoreAdj int64 `protobuf:"varint,5,opt,name=oom_score_adj,json=oomScoreAdj,proto3" json:"oom_score_adj,omitempty"` + // CpusetCpus constrains the allowed set of logical CPUs. Default: "" (not specified). + CpusetCpus string `protobuf:"bytes,6,opt,name=cpuset_cpus,json=cpusetCpus,proto3" json:"cpuset_cpus,omitempty"` + // CpusetMems constrains the allowed set of memory nodes. Default: "" (not specified). + CpusetMems string `protobuf:"bytes,7,opt,name=cpuset_mems,json=cpusetMems,proto3" json:"cpuset_mems,omitempty"` + // List of HugepageLimits to limit the HugeTLB usage of container per page size. Default: nil (not specified). + HugepageLimits []*HugepageLimit `protobuf:"bytes,8,rep,name=hugepage_limits,json=hugepageLimits,proto3" json:"hugepage_limits,omitempty"` + // Unified resources for cgroup v2. Default: nil (not specified). + // Each key/value in the map refers to the cgroup v2. + // e.g. "memory.max": "6937202688" or "io.weight": "default 100". + Unified map[string]string `protobuf:"bytes,9,rep,name=unified,proto3" json:"unified,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Memory swap limit in bytes. Default 0 (not specified). + MemorySwapLimitInBytes int64 `protobuf:"varint,10,opt,name=memory_swap_limit_in_bytes,json=memorySwapLimitInBytes,proto3" json:"memory_swap_limit_in_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LinuxContainerResources) Reset() { *m = LinuxContainerResources{} } +func (*LinuxContainerResources) ProtoMessage() {} +func (*LinuxContainerResources) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{49} +} +func (m *LinuxContainerResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LinuxContainerResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LinuxContainerResources.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LinuxContainerResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_LinuxContainerResources.Merge(m, src) +} +func (m *LinuxContainerResources) XXX_Size() int { + return m.Size() +} +func (m *LinuxContainerResources) XXX_DiscardUnknown() { + xxx_messageInfo_LinuxContainerResources.DiscardUnknown(m) +} + +var xxx_messageInfo_LinuxContainerResources proto.InternalMessageInfo + +func (m *LinuxContainerResources) GetCpuPeriod() int64 { + if m != nil { + return m.CpuPeriod + } + return 0 +} + +func (m *LinuxContainerResources) GetCpuQuota() int64 { + if m != nil { + return m.CpuQuota + } + return 0 +} + +func (m *LinuxContainerResources) GetCpuShares() int64 { + if m != nil { + return m.CpuShares + } + return 0 +} + +func (m *LinuxContainerResources) GetMemoryLimitInBytes() int64 { + if m != nil { + return m.MemoryLimitInBytes + } + return 0 +} + +func (m *LinuxContainerResources) GetOomScoreAdj() int64 { + if m != nil { + return m.OomScoreAdj + } + return 0 +} + +func (m *LinuxContainerResources) GetCpusetCpus() string { + if m != nil { + return m.CpusetCpus + } + return "" +} + +func (m *LinuxContainerResources) GetCpusetMems() string { + if m != nil { + return m.CpusetMems + } + return "" +} + +func (m *LinuxContainerResources) GetHugepageLimits() []*HugepageLimit { + if m != nil { + return m.HugepageLimits + } + return nil +} + +func (m *LinuxContainerResources) GetUnified() map[string]string { + if m != nil { + return m.Unified + } + return nil +} + +func (m *LinuxContainerResources) GetMemorySwapLimitInBytes() int64 { + if m != nil { + return m.MemorySwapLimitInBytes + } + return 0 +} + +// HugepageLimit corresponds to the file`hugetlb..limit_in_byte` in container level cgroup. +// For example, `PageSize=1GB`, `Limit=1073741824` means setting `1073741824` bytes to hugetlb.1GB.limit_in_bytes. +type HugepageLimit struct { + // The value of PageSize has the format B (2MB, 1GB), + // and must match the of the corresponding control file found in `hugetlb..limit_in_bytes`. + // The values of are intended to be parsed using base 1024("1KB" = 1024, "1MB" = 1048576, etc). + PageSize string `protobuf:"bytes,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // limit in bytes of hugepagesize HugeTLB usage. + Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HugepageLimit) Reset() { *m = HugepageLimit{} } +func (*HugepageLimit) ProtoMessage() {} +func (*HugepageLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{50} +} +func (m *HugepageLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HugepageLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HugepageLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HugepageLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_HugepageLimit.Merge(m, src) +} +func (m *HugepageLimit) XXX_Size() int { + return m.Size() +} +func (m *HugepageLimit) XXX_DiscardUnknown() { + xxx_messageInfo_HugepageLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_HugepageLimit proto.InternalMessageInfo + +func (m *HugepageLimit) GetPageSize() string { + if m != nil { + return m.PageSize + } + return "" +} + +func (m *HugepageLimit) GetLimit() uint64 { + if m != nil { + return m.Limit + } + return 0 +} + +// SELinuxOption are the labels to be applied to the container. +type SELinuxOption struct { + User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` + Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` + Level string `protobuf:"bytes,4,opt,name=level,proto3" json:"level,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SELinuxOption) Reset() { *m = SELinuxOption{} } +func (*SELinuxOption) ProtoMessage() {} +func (*SELinuxOption) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{51} +} +func (m *SELinuxOption) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SELinuxOption.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SELinuxOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxOption.Merge(m, src) +} +func (m *SELinuxOption) XXX_Size() int { + return m.Size() +} +func (m *SELinuxOption) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxOption.DiscardUnknown(m) +} + +var xxx_messageInfo_SELinuxOption proto.InternalMessageInfo + +func (m *SELinuxOption) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *SELinuxOption) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *SELinuxOption) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *SELinuxOption) GetLevel() string { + if m != nil { + return m.Level + } + return "" +} + +// Capability contains the container capabilities to add or drop +// Dropping a capability will drop it from all sets. +// If a capability is added to only the add_capabilities list then it gets added to permitted, +// inheritable, effective and bounding sets, i.e. all sets except the ambient set. +// If a capability is added to only the add_ambient_capabilities list then it gets added to all sets, i.e permitted +// inheritable, effective, bounding and ambient sets. +// If a capability is added to add_capabilities and add_ambient_capabilities lists then it gets added to all sets, i.e. +// permitted, inheritable, effective, bounding and ambient sets. +type Capability struct { + // List of capabilities to add. + AddCapabilities []string `protobuf:"bytes,1,rep,name=add_capabilities,json=addCapabilities,proto3" json:"add_capabilities,omitempty"` + // List of capabilities to drop. + DropCapabilities []string `protobuf:"bytes,2,rep,name=drop_capabilities,json=dropCapabilities,proto3" json:"drop_capabilities,omitempty"` + // List of ambient capabilities to add. + AddAmbientCapabilities []string `protobuf:"bytes,3,rep,name=add_ambient_capabilities,json=addAmbientCapabilities,proto3" json:"add_ambient_capabilities,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Capability) Reset() { *m = Capability{} } +func (*Capability) ProtoMessage() {} +func (*Capability) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{52} +} +func (m *Capability) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Capability) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Capability.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Capability) XXX_Merge(src proto.Message) { + xxx_messageInfo_Capability.Merge(m, src) +} +func (m *Capability) XXX_Size() int { + return m.Size() +} +func (m *Capability) XXX_DiscardUnknown() { + xxx_messageInfo_Capability.DiscardUnknown(m) +} + +var xxx_messageInfo_Capability proto.InternalMessageInfo + +func (m *Capability) GetAddCapabilities() []string { + if m != nil { + return m.AddCapabilities + } + return nil +} + +func (m *Capability) GetDropCapabilities() []string { + if m != nil { + return m.DropCapabilities + } + return nil +} + +func (m *Capability) GetAddAmbientCapabilities() []string { + if m != nil { + return m.AddAmbientCapabilities + } + return nil +} + +// LinuxContainerSecurityContext holds linux security configuration that will be applied to a container. +type LinuxContainerSecurityContext struct { + // Capabilities to add or drop. + Capabilities *Capability `protobuf:"bytes,1,opt,name=capabilities,proto3" json:"capabilities,omitempty"` + // If set, run container in privileged mode. + // Privileged mode is incompatible with the following options. If + // privileged is set, the following features MAY have no effect: + // 1. capabilities + // 2. selinux_options + // 4. seccomp + // 5. apparmor + // + // Privileged mode implies the following specific options are applied: + // 1. All capabilities are added. + // 2. Sensitive paths, such as kernel module paths within sysfs, are not masked. + // 3. Any sysfs and procfs mounts are mounted RW. + // 4. AppArmor confinement is not applied. + // 5. Seccomp restrictions are not applied. + // 6. The device cgroup does not restrict access to any devices. + // 7. All devices from the host's /dev are available within the container. + // 8. SELinux restrictions are not applied (e.g. label=disabled). + Privileged bool `protobuf:"varint,2,opt,name=privileged,proto3" json:"privileged,omitempty"` + // Configurations for the container's namespaces. + // Only used if the container uses namespace for isolation. + NamespaceOptions *NamespaceOption `protobuf:"bytes,3,opt,name=namespace_options,json=namespaceOptions,proto3" json:"namespace_options,omitempty"` + // SELinux context to be optionally applied. + SelinuxOptions *SELinuxOption `protobuf:"bytes,4,opt,name=selinux_options,json=selinuxOptions,proto3" json:"selinux_options,omitempty"` + // UID to run the container process as. Only one of run_as_user and + // run_as_username can be specified at a time. + RunAsUser *Int64Value `protobuf:"bytes,5,opt,name=run_as_user,json=runAsUser,proto3" json:"run_as_user,omitempty"` + // GID to run the container process as. run_as_group should only be specified + // when run_as_user or run_as_username is specified; otherwise, the runtime + // MUST error. + RunAsGroup *Int64Value `protobuf:"bytes,12,opt,name=run_as_group,json=runAsGroup,proto3" json:"run_as_group,omitempty"` + // User name to run the container process as. If specified, the user MUST + // exist in the container image (i.e. in the /etc/passwd inside the image), + // and be resolved there by the runtime; otherwise, the runtime MUST error. + RunAsUsername string `protobuf:"bytes,6,opt,name=run_as_username,json=runAsUsername,proto3" json:"run_as_username,omitempty"` + // If set, the root filesystem of the container is read-only. + ReadonlyRootfs bool `protobuf:"varint,7,opt,name=readonly_rootfs,json=readonlyRootfs,proto3" json:"readonly_rootfs,omitempty"` + // List of groups applied to the first process run in the container, in + // addition to the container's primary GID, and group memberships defined + // in the container image for the container's primary UID of the container process. + // If the list is empty, no additional groups are added to any container. + // Note that group memberships defined in the container image for the container's primary UID + // of the container process are still effective, even if they are not included in this list. + SupplementalGroups []int64 `protobuf:"varint,8,rep,packed,name=supplemental_groups,json=supplementalGroups,proto3" json:"supplemental_groups,omitempty"` + // no_new_privs defines if the flag for no_new_privs should be set on the + // container. + NoNewPrivs bool `protobuf:"varint,11,opt,name=no_new_privs,json=noNewPrivs,proto3" json:"no_new_privs,omitempty"` + // masked_paths is a slice of paths that should be masked by the container + // runtime, this can be passed directly to the OCI spec. + MaskedPaths []string `protobuf:"bytes,13,rep,name=masked_paths,json=maskedPaths,proto3" json:"masked_paths,omitempty"` + // readonly_paths is a slice of paths that should be set as readonly by the + // container runtime, this can be passed directly to the OCI spec. + ReadonlyPaths []string `protobuf:"bytes,14,rep,name=readonly_paths,json=readonlyPaths,proto3" json:"readonly_paths,omitempty"` + // Seccomp profile for the container. + Seccomp *SecurityProfile `protobuf:"bytes,15,opt,name=seccomp,proto3" json:"seccomp,omitempty"` + // AppArmor profile for the container. + Apparmor *SecurityProfile `protobuf:"bytes,16,opt,name=apparmor,proto3" json:"apparmor,omitempty"` + // AppArmor profile for the container, candidate values are: + // - runtime/default: equivalent to not specifying a profile. + // - unconfined: no profiles are loaded + // - localhost/: profile loaded on the node + // (localhost) by name. The possible profile names are detailed at + // https://gitlab.com/apparmor/apparmor/-/wikis/AppArmor_Core_Policy_Reference + ApparmorProfile string `protobuf:"bytes,9,opt,name=apparmor_profile,json=apparmorProfile,proto3" json:"apparmor_profile,omitempty"` // Deprecated: Do not use. + // Seccomp profile for the container, candidate values are: + // - runtime/default: the default profile for the container runtime + // - unconfined: unconfined profile, ie, no seccomp sandboxing + // - localhost/: the profile installed on the node. + // is the full path of the profile. + // + // Default: "", which is identical with unconfined. + SeccompProfilePath string `protobuf:"bytes,10,opt,name=seccomp_profile_path,json=seccompProfilePath,proto3" json:"seccomp_profile_path,omitempty"` // Deprecated: Do not use. + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LinuxContainerSecurityContext) Reset() { *m = LinuxContainerSecurityContext{} } +func (*LinuxContainerSecurityContext) ProtoMessage() {} +func (*LinuxContainerSecurityContext) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{53} +} +func (m *LinuxContainerSecurityContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LinuxContainerSecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LinuxContainerSecurityContext.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LinuxContainerSecurityContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_LinuxContainerSecurityContext.Merge(m, src) +} +func (m *LinuxContainerSecurityContext) XXX_Size() int { + return m.Size() +} +func (m *LinuxContainerSecurityContext) XXX_DiscardUnknown() { + xxx_messageInfo_LinuxContainerSecurityContext.DiscardUnknown(m) +} + +var xxx_messageInfo_LinuxContainerSecurityContext proto.InternalMessageInfo + +func (m *LinuxContainerSecurityContext) GetCapabilities() *Capability { + if m != nil { + return m.Capabilities + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetPrivileged() bool { + if m != nil { + return m.Privileged + } + return false +} + +func (m *LinuxContainerSecurityContext) GetNamespaceOptions() *NamespaceOption { + if m != nil { + return m.NamespaceOptions + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetSelinuxOptions() *SELinuxOption { + if m != nil { + return m.SelinuxOptions + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetRunAsUser() *Int64Value { + if m != nil { + return m.RunAsUser + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetRunAsGroup() *Int64Value { + if m != nil { + return m.RunAsGroup + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetRunAsUsername() string { + if m != nil { + return m.RunAsUsername + } + return "" +} + +func (m *LinuxContainerSecurityContext) GetReadonlyRootfs() bool { + if m != nil { + return m.ReadonlyRootfs + } + return false +} + +func (m *LinuxContainerSecurityContext) GetSupplementalGroups() []int64 { + if m != nil { + return m.SupplementalGroups + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetNoNewPrivs() bool { + if m != nil { + return m.NoNewPrivs + } + return false +} + +func (m *LinuxContainerSecurityContext) GetMaskedPaths() []string { + if m != nil { + return m.MaskedPaths + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetReadonlyPaths() []string { + if m != nil { + return m.ReadonlyPaths + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetSeccomp() *SecurityProfile { + if m != nil { + return m.Seccomp + } + return nil +} + +func (m *LinuxContainerSecurityContext) GetApparmor() *SecurityProfile { + if m != nil { + return m.Apparmor + } + return nil +} + +// Deprecated: Do not use. +func (m *LinuxContainerSecurityContext) GetApparmorProfile() string { + if m != nil { + return m.ApparmorProfile + } + return "" +} + +// Deprecated: Do not use. +func (m *LinuxContainerSecurityContext) GetSeccompProfilePath() string { + if m != nil { + return m.SeccompProfilePath + } + return "" +} + +// LinuxContainerConfig contains platform-specific configuration for +// Linux-based containers. +type LinuxContainerConfig struct { + // Resources specification for the container. + Resources *LinuxContainerResources `protobuf:"bytes,1,opt,name=resources,proto3" json:"resources,omitempty"` + // LinuxContainerSecurityContext configuration for the container. + SecurityContext *LinuxContainerSecurityContext `protobuf:"bytes,2,opt,name=security_context,json=securityContext,proto3" json:"security_context,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LinuxContainerConfig) Reset() { *m = LinuxContainerConfig{} } +func (*LinuxContainerConfig) ProtoMessage() {} +func (*LinuxContainerConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{54} +} +func (m *LinuxContainerConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LinuxContainerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LinuxContainerConfig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LinuxContainerConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_LinuxContainerConfig.Merge(m, src) +} +func (m *LinuxContainerConfig) XXX_Size() int { + return m.Size() +} +func (m *LinuxContainerConfig) XXX_DiscardUnknown() { + xxx_messageInfo_LinuxContainerConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_LinuxContainerConfig proto.InternalMessageInfo + +func (m *LinuxContainerConfig) GetResources() *LinuxContainerResources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *LinuxContainerConfig) GetSecurityContext() *LinuxContainerSecurityContext { + if m != nil { + return m.SecurityContext + } + return nil +} + +// WindowsNamespaceOption provides options for Windows namespaces. +type WindowsNamespaceOption struct { + // Network namespace for this container/sandbox. + // Namespaces currently set by the kubelet: POD, NODE + Network NamespaceMode `protobuf:"varint,1,opt,name=network,proto3,enum=runtime.v1.NamespaceMode" json:"network,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WindowsNamespaceOption) Reset() { *m = WindowsNamespaceOption{} } +func (*WindowsNamespaceOption) ProtoMessage() {} +func (*WindowsNamespaceOption) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{55} +} +func (m *WindowsNamespaceOption) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsNamespaceOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WindowsNamespaceOption.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WindowsNamespaceOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsNamespaceOption.Merge(m, src) +} +func (m *WindowsNamespaceOption) XXX_Size() int { + return m.Size() +} +func (m *WindowsNamespaceOption) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsNamespaceOption.DiscardUnknown(m) +} + +var xxx_messageInfo_WindowsNamespaceOption proto.InternalMessageInfo + +func (m *WindowsNamespaceOption) GetNetwork() NamespaceMode { + if m != nil { + return m.Network + } + return NamespaceMode_POD +} + +// WindowsSandboxSecurityContext holds platform-specific configurations that will be +// applied to a sandbox. +// These settings will only apply to the sandbox container. +type WindowsSandboxSecurityContext struct { + // User name to run the container process as. If specified, the user MUST + // exist in the container image and be resolved there by the runtime; + // otherwise, the runtime MUST return error. + RunAsUsername string `protobuf:"bytes,1,opt,name=run_as_username,json=runAsUsername,proto3" json:"run_as_username,omitempty"` + // The contents of the GMSA credential spec to use to run this container. + CredentialSpec string `protobuf:"bytes,2,opt,name=credential_spec,json=credentialSpec,proto3" json:"credential_spec,omitempty"` + // Indicates whether the container requested to run as a HostProcess container. + HostProcess bool `protobuf:"varint,3,opt,name=host_process,json=hostProcess,proto3" json:"host_process,omitempty"` + // Configuration for the sandbox's namespaces + NamespaceOptions *WindowsNamespaceOption `protobuf:"bytes,4,opt,name=namespace_options,json=namespaceOptions,proto3" json:"namespace_options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WindowsSandboxSecurityContext) Reset() { *m = WindowsSandboxSecurityContext{} } +func (*WindowsSandboxSecurityContext) ProtoMessage() {} +func (*WindowsSandboxSecurityContext) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{56} +} +func (m *WindowsSandboxSecurityContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsSandboxSecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WindowsSandboxSecurityContext.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WindowsSandboxSecurityContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsSandboxSecurityContext.Merge(m, src) +} +func (m *WindowsSandboxSecurityContext) XXX_Size() int { + return m.Size() +} +func (m *WindowsSandboxSecurityContext) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsSandboxSecurityContext.DiscardUnknown(m) +} + +var xxx_messageInfo_WindowsSandboxSecurityContext proto.InternalMessageInfo + +func (m *WindowsSandboxSecurityContext) GetRunAsUsername() string { + if m != nil { + return m.RunAsUsername + } + return "" +} + +func (m *WindowsSandboxSecurityContext) GetCredentialSpec() string { + if m != nil { + return m.CredentialSpec + } + return "" +} + +func (m *WindowsSandboxSecurityContext) GetHostProcess() bool { + if m != nil { + return m.HostProcess + } + return false +} + +func (m *WindowsSandboxSecurityContext) GetNamespaceOptions() *WindowsNamespaceOption { + if m != nil { + return m.NamespaceOptions + } + return nil +} + +// WindowsPodSandboxConfig holds platform-specific configurations for Windows +// host platforms and Windows-based containers. +type WindowsPodSandboxConfig struct { + // WindowsSandboxSecurityContext holds sandbox security attributes. + SecurityContext *WindowsSandboxSecurityContext `protobuf:"bytes,1,opt,name=security_context,json=securityContext,proto3" json:"security_context,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WindowsPodSandboxConfig) Reset() { *m = WindowsPodSandboxConfig{} } +func (*WindowsPodSandboxConfig) ProtoMessage() {} +func (*WindowsPodSandboxConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{57} +} +func (m *WindowsPodSandboxConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsPodSandboxConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WindowsPodSandboxConfig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WindowsPodSandboxConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsPodSandboxConfig.Merge(m, src) +} +func (m *WindowsPodSandboxConfig) XXX_Size() int { + return m.Size() +} +func (m *WindowsPodSandboxConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsPodSandboxConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WindowsPodSandboxConfig proto.InternalMessageInfo + +func (m *WindowsPodSandboxConfig) GetSecurityContext() *WindowsSandboxSecurityContext { + if m != nil { + return m.SecurityContext + } + return nil +} + +// WindowsContainerSecurityContext holds windows security configuration that will be applied to a container. +type WindowsContainerSecurityContext struct { + // User name to run the container process as. If specified, the user MUST + // exist in the container image and be resolved there by the runtime; + // otherwise, the runtime MUST return error. + RunAsUsername string `protobuf:"bytes,1,opt,name=run_as_username,json=runAsUsername,proto3" json:"run_as_username,omitempty"` + // The contents of the GMSA credential spec to use to run this container. + CredentialSpec string `protobuf:"bytes,2,opt,name=credential_spec,json=credentialSpec,proto3" json:"credential_spec,omitempty"` + // Indicates whether a container is to be run as a HostProcess container. + HostProcess bool `protobuf:"varint,3,opt,name=host_process,json=hostProcess,proto3" json:"host_process,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WindowsContainerSecurityContext) Reset() { *m = WindowsContainerSecurityContext{} } +func (*WindowsContainerSecurityContext) ProtoMessage() {} +func (*WindowsContainerSecurityContext) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{58} +} +func (m *WindowsContainerSecurityContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsContainerSecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WindowsContainerSecurityContext.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WindowsContainerSecurityContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsContainerSecurityContext.Merge(m, src) +} +func (m *WindowsContainerSecurityContext) XXX_Size() int { + return m.Size() +} +func (m *WindowsContainerSecurityContext) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsContainerSecurityContext.DiscardUnknown(m) +} + +var xxx_messageInfo_WindowsContainerSecurityContext proto.InternalMessageInfo + +func (m *WindowsContainerSecurityContext) GetRunAsUsername() string { + if m != nil { + return m.RunAsUsername + } + return "" +} + +func (m *WindowsContainerSecurityContext) GetCredentialSpec() string { + if m != nil { + return m.CredentialSpec + } + return "" +} + +func (m *WindowsContainerSecurityContext) GetHostProcess() bool { + if m != nil { + return m.HostProcess + } + return false +} + +// WindowsContainerConfig contains platform-specific configuration for +// Windows-based containers. +type WindowsContainerConfig struct { + // Resources specification for the container. + Resources *WindowsContainerResources `protobuf:"bytes,1,opt,name=resources,proto3" json:"resources,omitempty"` + // WindowsContainerSecurityContext configuration for the container. + SecurityContext *WindowsContainerSecurityContext `protobuf:"bytes,2,opt,name=security_context,json=securityContext,proto3" json:"security_context,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WindowsContainerConfig) Reset() { *m = WindowsContainerConfig{} } +func (*WindowsContainerConfig) ProtoMessage() {} +func (*WindowsContainerConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{59} +} +func (m *WindowsContainerConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsContainerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WindowsContainerConfig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WindowsContainerConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsContainerConfig.Merge(m, src) +} +func (m *WindowsContainerConfig) XXX_Size() int { + return m.Size() +} +func (m *WindowsContainerConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsContainerConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WindowsContainerConfig proto.InternalMessageInfo + +func (m *WindowsContainerConfig) GetResources() *WindowsContainerResources { + if m != nil { + return m.Resources + } + return nil +} + +func (m *WindowsContainerConfig) GetSecurityContext() *WindowsContainerSecurityContext { + if m != nil { + return m.SecurityContext + } + return nil +} + +// WindowsContainerResources specifies Windows specific configuration for +// resources. +type WindowsContainerResources struct { + // CPU shares (relative weight vs. other containers). Default: 0 (not specified). + CpuShares int64 `protobuf:"varint,1,opt,name=cpu_shares,json=cpuShares,proto3" json:"cpu_shares,omitempty"` + // Number of CPUs available to the container. Default: 0 (not specified). + CpuCount int64 `protobuf:"varint,2,opt,name=cpu_count,json=cpuCount,proto3" json:"cpu_count,omitempty"` + // Specifies the portion of processor cycles that this container can use as a percentage times 100. + CpuMaximum int64 `protobuf:"varint,3,opt,name=cpu_maximum,json=cpuMaximum,proto3" json:"cpu_maximum,omitempty"` + // Memory limit in bytes. Default: 0 (not specified). + MemoryLimitInBytes int64 `protobuf:"varint,4,opt,name=memory_limit_in_bytes,json=memoryLimitInBytes,proto3" json:"memory_limit_in_bytes,omitempty"` + // Specifies the size of the rootfs / scratch space in bytes to be configured for this container. Default: 0 (not specified). + RootfsSizeInBytes int64 `protobuf:"varint,5,opt,name=rootfs_size_in_bytes,json=rootfsSizeInBytes,proto3" json:"rootfs_size_in_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WindowsContainerResources) Reset() { *m = WindowsContainerResources{} } +func (*WindowsContainerResources) ProtoMessage() {} +func (*WindowsContainerResources) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{60} +} +func (m *WindowsContainerResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsContainerResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WindowsContainerResources.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WindowsContainerResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsContainerResources.Merge(m, src) +} +func (m *WindowsContainerResources) XXX_Size() int { + return m.Size() +} +func (m *WindowsContainerResources) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsContainerResources.DiscardUnknown(m) +} + +var xxx_messageInfo_WindowsContainerResources proto.InternalMessageInfo + +func (m *WindowsContainerResources) GetCpuShares() int64 { + if m != nil { + return m.CpuShares + } + return 0 +} + +func (m *WindowsContainerResources) GetCpuCount() int64 { + if m != nil { + return m.CpuCount + } + return 0 +} + +func (m *WindowsContainerResources) GetCpuMaximum() int64 { + if m != nil { + return m.CpuMaximum + } + return 0 +} + +func (m *WindowsContainerResources) GetMemoryLimitInBytes() int64 { + if m != nil { + return m.MemoryLimitInBytes + } + return 0 +} + +func (m *WindowsContainerResources) GetRootfsSizeInBytes() int64 { + if m != nil { + return m.RootfsSizeInBytes + } + return 0 +} + +// ContainerMetadata holds all necessary information for building the container +// name. The container runtime is encouraged to expose the metadata in its user +// interface for better user experience. E.g., runtime can construct a unique +// container name based on the metadata. Note that (name, attempt) is unique +// within a sandbox for the entire lifetime of the sandbox. +type ContainerMetadata struct { + // Name of the container. Same as the container name in the PodSpec. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Attempt number of creating the container. Default: 0. + Attempt uint32 `protobuf:"varint,2,opt,name=attempt,proto3" json:"attempt,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerMetadata) Reset() { *m = ContainerMetadata{} } +func (*ContainerMetadata) ProtoMessage() {} +func (*ContainerMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{61} +} +func (m *ContainerMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerMetadata.Merge(m, src) +} +func (m *ContainerMetadata) XXX_Size() int { + return m.Size() +} +func (m *ContainerMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerMetadata proto.InternalMessageInfo + +func (m *ContainerMetadata) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ContainerMetadata) GetAttempt() uint32 { + if m != nil { + return m.Attempt + } + return 0 +} + +// Device specifies a host device to mount into a container. +type Device struct { + // Path of the device within the container. + ContainerPath string `protobuf:"bytes,1,opt,name=container_path,json=containerPath,proto3" json:"container_path,omitempty"` + // Path of the device on the host. + HostPath string `protobuf:"bytes,2,opt,name=host_path,json=hostPath,proto3" json:"host_path,omitempty"` + // Cgroups permissions of the device, candidates are one or more of + // * r - allows container to read from the specified device. + // * w - allows container to write to the specified device. + // * m - allows container to create device files that do not yet exist. + Permissions string `protobuf:"bytes,3,opt,name=permissions,proto3" json:"permissions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Device) Reset() { *m = Device{} } +func (*Device) ProtoMessage() {} +func (*Device) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{62} +} +func (m *Device) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Device.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Device) XXX_Merge(src proto.Message) { + xxx_messageInfo_Device.Merge(m, src) +} +func (m *Device) XXX_Size() int { + return m.Size() +} +func (m *Device) XXX_DiscardUnknown() { + xxx_messageInfo_Device.DiscardUnknown(m) +} + +var xxx_messageInfo_Device proto.InternalMessageInfo + +func (m *Device) GetContainerPath() string { + if m != nil { + return m.ContainerPath + } + return "" +} + +func (m *Device) GetHostPath() string { + if m != nil { + return m.HostPath + } + return "" +} + +func (m *Device) GetPermissions() string { + if m != nil { + return m.Permissions + } + return "" +} + +// CDIDevice specifies a CDI device information. +type CDIDevice struct { + // Fully qualified CDI device name + // for example: vendor.com/gpu=gpudevice1 + // see more details in the CDI specification: + // https://github.com/container-orchestrated-devices/container-device-interface/blob/main/SPEC.md + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CDIDevice) Reset() { *m = CDIDevice{} } +func (*CDIDevice) ProtoMessage() {} +func (*CDIDevice) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{63} +} +func (m *CDIDevice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CDIDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CDIDevice.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CDIDevice) XXX_Merge(src proto.Message) { + xxx_messageInfo_CDIDevice.Merge(m, src) +} +func (m *CDIDevice) XXX_Size() int { + return m.Size() +} +func (m *CDIDevice) XXX_DiscardUnknown() { + xxx_messageInfo_CDIDevice.DiscardUnknown(m) +} + +var xxx_messageInfo_CDIDevice proto.InternalMessageInfo + +func (m *CDIDevice) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// ContainerConfig holds all the required and optional fields for creating a +// container. +type ContainerConfig struct { + // Metadata of the container. This information will uniquely identify the + // container, and the runtime should leverage this to ensure correct + // operation. The runtime may also use this information to improve UX, such + // as by constructing a readable name. + Metadata *ContainerMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Image to use. + Image *ImageSpec `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` + // Command to execute (i.e., entrypoint for docker) + Command []string `protobuf:"bytes,3,rep,name=command,proto3" json:"command,omitempty"` + // Args for the Command (i.e., command for docker) + Args []string `protobuf:"bytes,4,rep,name=args,proto3" json:"args,omitempty"` + // Current working directory of the command. + WorkingDir string `protobuf:"bytes,5,opt,name=working_dir,json=workingDir,proto3" json:"working_dir,omitempty"` + // List of environment variable to set in the container. + Envs []*KeyValue `protobuf:"bytes,6,rep,name=envs,proto3" json:"envs,omitempty"` + // Mounts for the container. + Mounts []*Mount `protobuf:"bytes,7,rep,name=mounts,proto3" json:"mounts,omitempty"` + // Devices for the container. + Devices []*Device `protobuf:"bytes,8,rep,name=devices,proto3" json:"devices,omitempty"` + // Key-value pairs that may be used to scope and select individual resources. + // Label keys are of the form: + // + // label-key ::= prefixed-name | name + // prefixed-name ::= prefix '/' name + // prefix ::= DNS_SUBDOMAIN + // name ::= DNS_LABEL + Labels map[string]string `protobuf:"bytes,9,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Unstructured key-value map that may be used by the kubelet to store and + // retrieve arbitrary metadata. + // + // Annotations MUST NOT be altered by the runtime; the annotations stored + // here MUST be returned in the ContainerStatus associated with the container + // this ContainerConfig creates. + // + // In general, in order to preserve a well-defined interface between the + // kubelet and the container runtime, annotations SHOULD NOT influence + // runtime behaviour. + Annotations map[string]string `protobuf:"bytes,10,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Path relative to PodSandboxConfig.LogDirectory for container to store + // the log (STDOUT and STDERR) on the host. + // E.g., + // + // PodSandboxConfig.LogDirectory = `/var/log/pods/__/` + // ContainerConfig.LogPath = `containerName/Instance#.log` + LogPath string `protobuf:"bytes,11,opt,name=log_path,json=logPath,proto3" json:"log_path,omitempty"` + // Variables for interactive containers, these have very specialized + // use-cases (e.g. debugging). + Stdin bool `protobuf:"varint,12,opt,name=stdin,proto3" json:"stdin,omitempty"` + StdinOnce bool `protobuf:"varint,13,opt,name=stdin_once,json=stdinOnce,proto3" json:"stdin_once,omitempty"` + Tty bool `protobuf:"varint,14,opt,name=tty,proto3" json:"tty,omitempty"` + // Configuration specific to Linux containers. + Linux *LinuxContainerConfig `protobuf:"bytes,15,opt,name=linux,proto3" json:"linux,omitempty"` + // Configuration specific to Windows containers. + Windows *WindowsContainerConfig `protobuf:"bytes,16,opt,name=windows,proto3" json:"windows,omitempty"` + // CDI devices for the container. + CDIDevices []*CDIDevice `protobuf:"bytes,17,rep,name=CDI_devices,json=CDIDevices,proto3" json:"CDI_devices,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerConfig) Reset() { *m = ContainerConfig{} } +func (*ContainerConfig) ProtoMessage() {} +func (*ContainerConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{64} +} +func (m *ContainerConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerConfig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerConfig.Merge(m, src) +} +func (m *ContainerConfig) XXX_Size() int { + return m.Size() +} +func (m *ContainerConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerConfig proto.InternalMessageInfo + +func (m *ContainerConfig) GetMetadata() *ContainerMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *ContainerConfig) GetImage() *ImageSpec { + if m != nil { + return m.Image + } + return nil +} + +func (m *ContainerConfig) GetCommand() []string { + if m != nil { + return m.Command + } + return nil +} + +func (m *ContainerConfig) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *ContainerConfig) GetWorkingDir() string { + if m != nil { + return m.WorkingDir + } + return "" +} + +func (m *ContainerConfig) GetEnvs() []*KeyValue { + if m != nil { + return m.Envs + } + return nil +} + +func (m *ContainerConfig) GetMounts() []*Mount { + if m != nil { + return m.Mounts + } + return nil +} + +func (m *ContainerConfig) GetDevices() []*Device { + if m != nil { + return m.Devices + } + return nil +} + +func (m *ContainerConfig) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ContainerConfig) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +func (m *ContainerConfig) GetLogPath() string { + if m != nil { + return m.LogPath + } + return "" +} + +func (m *ContainerConfig) GetStdin() bool { + if m != nil { + return m.Stdin + } + return false +} + +func (m *ContainerConfig) GetStdinOnce() bool { + if m != nil { + return m.StdinOnce + } + return false +} + +func (m *ContainerConfig) GetTty() bool { + if m != nil { + return m.Tty + } + return false +} + +func (m *ContainerConfig) GetLinux() *LinuxContainerConfig { + if m != nil { + return m.Linux + } + return nil +} + +func (m *ContainerConfig) GetWindows() *WindowsContainerConfig { + if m != nil { + return m.Windows + } + return nil +} + +func (m *ContainerConfig) GetCDIDevices() []*CDIDevice { + if m != nil { + return m.CDIDevices + } + return nil +} + +type CreateContainerRequest struct { + // ID of the PodSandbox in which the container should be created. + PodSandboxId string `protobuf:"bytes,1,opt,name=pod_sandbox_id,json=podSandboxId,proto3" json:"pod_sandbox_id,omitempty"` + // Config of the container. + Config *ContainerConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + // Config of the PodSandbox. This is the same config that was passed + // to RunPodSandboxRequest to create the PodSandbox. It is passed again + // here just for easy reference. The PodSandboxConfig is immutable and + // remains the same throughout the lifetime of the pod. + SandboxConfig *PodSandboxConfig `protobuf:"bytes,3,opt,name=sandbox_config,json=sandboxConfig,proto3" json:"sandbox_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateContainerRequest) Reset() { *m = CreateContainerRequest{} } +func (*CreateContainerRequest) ProtoMessage() {} +func (*CreateContainerRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{65} +} +func (m *CreateContainerRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CreateContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CreateContainerRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CreateContainerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateContainerRequest.Merge(m, src) +} +func (m *CreateContainerRequest) XXX_Size() int { + return m.Size() +} +func (m *CreateContainerRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateContainerRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateContainerRequest proto.InternalMessageInfo + +func (m *CreateContainerRequest) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +func (m *CreateContainerRequest) GetConfig() *ContainerConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *CreateContainerRequest) GetSandboxConfig() *PodSandboxConfig { + if m != nil { + return m.SandboxConfig + } + return nil +} + +type CreateContainerResponse struct { + // ID of the created container. + ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateContainerResponse) Reset() { *m = CreateContainerResponse{} } +func (*CreateContainerResponse) ProtoMessage() {} +func (*CreateContainerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{66} +} +func (m *CreateContainerResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CreateContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CreateContainerResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CreateContainerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateContainerResponse.Merge(m, src) +} +func (m *CreateContainerResponse) XXX_Size() int { + return m.Size() +} +func (m *CreateContainerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateContainerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateContainerResponse proto.InternalMessageInfo + +func (m *CreateContainerResponse) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +type StartContainerRequest struct { + // ID of the container to start. + ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartContainerRequest) Reset() { *m = StartContainerRequest{} } +func (*StartContainerRequest) ProtoMessage() {} +func (*StartContainerRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{67} +} +func (m *StartContainerRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StartContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StartContainerRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StartContainerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartContainerRequest.Merge(m, src) +} +func (m *StartContainerRequest) XXX_Size() int { + return m.Size() +} +func (m *StartContainerRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartContainerRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartContainerRequest proto.InternalMessageInfo + +func (m *StartContainerRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +type StartContainerResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartContainerResponse) Reset() { *m = StartContainerResponse{} } +func (*StartContainerResponse) ProtoMessage() {} +func (*StartContainerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{68} +} +func (m *StartContainerResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StartContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StartContainerResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StartContainerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartContainerResponse.Merge(m, src) +} +func (m *StartContainerResponse) XXX_Size() int { + return m.Size() +} +func (m *StartContainerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StartContainerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StartContainerResponse proto.InternalMessageInfo + +type StopContainerRequest struct { + // ID of the container to stop. + ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // Timeout in seconds to wait for the container to stop before forcibly + // terminating it. Default: 0 (forcibly terminate the container immediately) + Timeout int64 `protobuf:"varint,2,opt,name=timeout,proto3" json:"timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopContainerRequest) Reset() { *m = StopContainerRequest{} } +func (*StopContainerRequest) ProtoMessage() {} +func (*StopContainerRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{69} +} +func (m *StopContainerRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StopContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StopContainerRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StopContainerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopContainerRequest.Merge(m, src) +} +func (m *StopContainerRequest) XXX_Size() int { + return m.Size() +} +func (m *StopContainerRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StopContainerRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StopContainerRequest proto.InternalMessageInfo + +func (m *StopContainerRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +func (m *StopContainerRequest) GetTimeout() int64 { + if m != nil { + return m.Timeout + } + return 0 +} + +type StopContainerResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopContainerResponse) Reset() { *m = StopContainerResponse{} } +func (*StopContainerResponse) ProtoMessage() {} +func (*StopContainerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{70} +} +func (m *StopContainerResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StopContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StopContainerResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StopContainerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopContainerResponse.Merge(m, src) +} +func (m *StopContainerResponse) XXX_Size() int { + return m.Size() +} +func (m *StopContainerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StopContainerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StopContainerResponse proto.InternalMessageInfo + +type RemoveContainerRequest struct { + // ID of the container to remove. + ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoveContainerRequest) Reset() { *m = RemoveContainerRequest{} } +func (*RemoveContainerRequest) ProtoMessage() {} +func (*RemoveContainerRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{71} +} +func (m *RemoveContainerRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveContainerRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveContainerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveContainerRequest.Merge(m, src) +} +func (m *RemoveContainerRequest) XXX_Size() int { + return m.Size() +} +func (m *RemoveContainerRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveContainerRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveContainerRequest proto.InternalMessageInfo + +func (m *RemoveContainerRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +type RemoveContainerResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoveContainerResponse) Reset() { *m = RemoveContainerResponse{} } +func (*RemoveContainerResponse) ProtoMessage() {} +func (*RemoveContainerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{72} +} +func (m *RemoveContainerResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveContainerResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveContainerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveContainerResponse.Merge(m, src) +} +func (m *RemoveContainerResponse) XXX_Size() int { + return m.Size() +} +func (m *RemoveContainerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveContainerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveContainerResponse proto.InternalMessageInfo + +// ContainerStateValue is the wrapper of ContainerState. +type ContainerStateValue struct { + // State of the container. + State ContainerState `protobuf:"varint,1,opt,name=state,proto3,enum=runtime.v1.ContainerState" json:"state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerStateValue) Reset() { *m = ContainerStateValue{} } +func (*ContainerStateValue) ProtoMessage() {} +func (*ContainerStateValue) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{73} +} +func (m *ContainerStateValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStateValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerStateValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerStateValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStateValue.Merge(m, src) +} +func (m *ContainerStateValue) XXX_Size() int { + return m.Size() +} +func (m *ContainerStateValue) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStateValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerStateValue proto.InternalMessageInfo + +func (m *ContainerStateValue) GetState() ContainerState { + if m != nil { + return m.State + } + return ContainerState_CONTAINER_CREATED +} + +// ContainerFilter is used to filter containers. +// All those fields are combined with 'AND' +type ContainerFilter struct { + // ID of the container. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // State of the container. + State *ContainerStateValue `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + // ID of the PodSandbox. + PodSandboxId string `protobuf:"bytes,3,opt,name=pod_sandbox_id,json=podSandboxId,proto3" json:"pod_sandbox_id,omitempty"` + // LabelSelector to select matches. + // Only api.MatchLabels is supported for now and the requirements + // are ANDed. MatchExpressions is not supported yet. + LabelSelector map[string]string `protobuf:"bytes,4,rep,name=label_selector,json=labelSelector,proto3" json:"label_selector,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerFilter) Reset() { *m = ContainerFilter{} } +func (*ContainerFilter) ProtoMessage() {} +func (*ContainerFilter) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{74} +} +func (m *ContainerFilter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerFilter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerFilter) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerFilter.Merge(m, src) +} +func (m *ContainerFilter) XXX_Size() int { + return m.Size() +} +func (m *ContainerFilter) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerFilter.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerFilter proto.InternalMessageInfo + +func (m *ContainerFilter) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ContainerFilter) GetState() *ContainerStateValue { + if m != nil { + return m.State + } + return nil +} + +func (m *ContainerFilter) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +func (m *ContainerFilter) GetLabelSelector() map[string]string { + if m != nil { + return m.LabelSelector + } + return nil +} + +type ListContainersRequest struct { + Filter *ContainerFilter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListContainersRequest) Reset() { *m = ListContainersRequest{} } +func (*ListContainersRequest) ProtoMessage() {} +func (*ListContainersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{75} +} +func (m *ListContainersRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListContainersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListContainersRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListContainersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListContainersRequest.Merge(m, src) +} +func (m *ListContainersRequest) XXX_Size() int { + return m.Size() +} +func (m *ListContainersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListContainersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListContainersRequest proto.InternalMessageInfo + +func (m *ListContainersRequest) GetFilter() *ContainerFilter { + if m != nil { + return m.Filter + } + return nil +} + +// Container provides the runtime information for a container, such as ID, hash, +// state of the container. +type Container struct { + // ID of the container, used by the container runtime to identify + // a container. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the sandbox to which this container belongs. + PodSandboxId string `protobuf:"bytes,2,opt,name=pod_sandbox_id,json=podSandboxId,proto3" json:"pod_sandbox_id,omitempty"` + // Metadata of the container. + Metadata *ContainerMetadata `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Spec of the image. + Image *ImageSpec `protobuf:"bytes,4,opt,name=image,proto3" json:"image,omitempty"` + // Reference to the image in use. For most runtimes, this should be an + // image ID. + ImageRef string `protobuf:"bytes,5,opt,name=image_ref,json=imageRef,proto3" json:"image_ref,omitempty"` + // State of the container. + State ContainerState `protobuf:"varint,6,opt,name=state,proto3,enum=runtime.v1.ContainerState" json:"state,omitempty"` + // Creation time of the container in nanoseconds. + CreatedAt int64 `protobuf:"varint,7,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Key-value pairs that may be used to scope and select individual resources. + Labels map[string]string `protobuf:"bytes,8,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Unstructured key-value map holding arbitrary metadata. + // Annotations MUST NOT be altered by the runtime; the value of this field + // MUST be identical to that of the corresponding ContainerConfig used to + // instantiate this Container. + Annotations map[string]string `protobuf:"bytes,9,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Container) Reset() { *m = Container{} } +func (*Container) ProtoMessage() {} +func (*Container) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{76} +} +func (m *Container) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Container) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Container.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Container) XXX_Merge(src proto.Message) { + xxx_messageInfo_Container.Merge(m, src) +} +func (m *Container) XXX_Size() int { + return m.Size() +} +func (m *Container) XXX_DiscardUnknown() { + xxx_messageInfo_Container.DiscardUnknown(m) +} + +var xxx_messageInfo_Container proto.InternalMessageInfo + +func (m *Container) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Container) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +func (m *Container) GetMetadata() *ContainerMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *Container) GetImage() *ImageSpec { + if m != nil { + return m.Image + } + return nil +} + +func (m *Container) GetImageRef() string { + if m != nil { + return m.ImageRef + } + return "" +} + +func (m *Container) GetState() ContainerState { + if m != nil { + return m.State + } + return ContainerState_CONTAINER_CREATED +} + +func (m *Container) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *Container) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Container) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +type ListContainersResponse struct { + // List of containers. + Containers []*Container `protobuf:"bytes,1,rep,name=containers,proto3" json:"containers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListContainersResponse) Reset() { *m = ListContainersResponse{} } +func (*ListContainersResponse) ProtoMessage() {} +func (*ListContainersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{77} +} +func (m *ListContainersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListContainersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListContainersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListContainersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListContainersResponse.Merge(m, src) +} +func (m *ListContainersResponse) XXX_Size() int { + return m.Size() +} +func (m *ListContainersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListContainersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListContainersResponse proto.InternalMessageInfo + +func (m *ListContainersResponse) GetContainers() []*Container { + if m != nil { + return m.Containers + } + return nil +} + +type ContainerStatusRequest struct { + // ID of the container for which to retrieve status. + ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // Verbose indicates whether to return extra information about the container. + Verbose bool `protobuf:"varint,2,opt,name=verbose,proto3" json:"verbose,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerStatusRequest) Reset() { *m = ContainerStatusRequest{} } +func (*ContainerStatusRequest) ProtoMessage() {} +func (*ContainerStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{78} +} +func (m *ContainerStatusRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerStatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStatusRequest.Merge(m, src) +} +func (m *ContainerStatusRequest) XXX_Size() int { + return m.Size() +} +func (m *ContainerStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerStatusRequest proto.InternalMessageInfo + +func (m *ContainerStatusRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +func (m *ContainerStatusRequest) GetVerbose() bool { + if m != nil { + return m.Verbose + } + return false +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + // ID of the container. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Metadata of the container. + Metadata *ContainerMetadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Status of the container. + State ContainerState `protobuf:"varint,3,opt,name=state,proto3,enum=runtime.v1.ContainerState" json:"state,omitempty"` + // Creation time of the container in nanoseconds. + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Start time of the container in nanoseconds. Default: 0 (not specified). + StartedAt int64 `protobuf:"varint,5,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + // Finish time of the container in nanoseconds. Default: 0 (not specified). + FinishedAt int64 `protobuf:"varint,6,opt,name=finished_at,json=finishedAt,proto3" json:"finished_at,omitempty"` + // Exit code of the container. Only required when finished_at != 0. Default: 0. + ExitCode int32 `protobuf:"varint,7,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` + // Spec of the image. + Image *ImageSpec `protobuf:"bytes,8,opt,name=image,proto3" json:"image,omitempty"` + // Reference to the image in use. For most runtimes, this should be an + // image ID + ImageRef string `protobuf:"bytes,9,opt,name=image_ref,json=imageRef,proto3" json:"image_ref,omitempty"` + // Brief CamelCase string explaining why container is in its current state. + // Must be set to "OOMKilled" for containers terminated by cgroup-based Out-of-Memory killer. + Reason string `protobuf:"bytes,10,opt,name=reason,proto3" json:"reason,omitempty"` + // Human-readable message indicating details about why container is in its + // current state. + Message string `protobuf:"bytes,11,opt,name=message,proto3" json:"message,omitempty"` + // Key-value pairs that may be used to scope and select individual resources. + Labels map[string]string `protobuf:"bytes,12,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Unstructured key-value map holding arbitrary metadata. + // Annotations MUST NOT be altered by the runtime; the value of this field + // MUST be identical to that of the corresponding ContainerConfig used to + // instantiate the Container this status represents. + Annotations map[string]string `protobuf:"bytes,13,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Mounts for the container. + Mounts []*Mount `protobuf:"bytes,14,rep,name=mounts,proto3" json:"mounts,omitempty"` + // Log path of container. + LogPath string `protobuf:"bytes,15,opt,name=log_path,json=logPath,proto3" json:"log_path,omitempty"` + // Resource limits configuration of the container. + Resources *ContainerResources `protobuf:"bytes,16,opt,name=resources,proto3" json:"resources,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } +func (*ContainerStatus) ProtoMessage() {} +func (*ContainerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{79} +} +func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerStatus.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStatus.Merge(m, src) +} +func (m *ContainerStatus) XXX_Size() int { + return m.Size() +} +func (m *ContainerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo + +func (m *ContainerStatus) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ContainerStatus) GetMetadata() *ContainerMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *ContainerStatus) GetState() ContainerState { + if m != nil { + return m.State + } + return ContainerState_CONTAINER_CREATED +} + +func (m *ContainerStatus) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *ContainerStatus) GetStartedAt() int64 { + if m != nil { + return m.StartedAt + } + return 0 +} + +func (m *ContainerStatus) GetFinishedAt() int64 { + if m != nil { + return m.FinishedAt + } + return 0 +} + +func (m *ContainerStatus) GetExitCode() int32 { + if m != nil { + return m.ExitCode + } + return 0 +} + +func (m *ContainerStatus) GetImage() *ImageSpec { + if m != nil { + return m.Image + } + return nil +} + +func (m *ContainerStatus) GetImageRef() string { + if m != nil { + return m.ImageRef + } + return "" +} + +func (m *ContainerStatus) GetReason() string { + if m != nil { + return m.Reason + } + return "" +} + +func (m *ContainerStatus) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *ContainerStatus) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ContainerStatus) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +func (m *ContainerStatus) GetMounts() []*Mount { + if m != nil { + return m.Mounts + } + return nil +} + +func (m *ContainerStatus) GetLogPath() string { + if m != nil { + return m.LogPath + } + return "" +} + +func (m *ContainerStatus) GetResources() *ContainerResources { + if m != nil { + return m.Resources + } + return nil +} + +type ContainerStatusResponse struct { + // Status of the container. + Status *ContainerStatus `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // Info is extra information of the Container. The key could be arbitrary string, and + // value should be in json format. The information could include anything useful for + // debug, e.g. pid for linux container based container runtime. + // It should only be returned non-empty when Verbose is true. + Info map[string]string `protobuf:"bytes,2,rep,name=info,proto3" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerStatusResponse) Reset() { *m = ContainerStatusResponse{} } +func (*ContainerStatusResponse) ProtoMessage() {} +func (*ContainerStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{80} +} +func (m *ContainerStatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerStatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStatusResponse.Merge(m, src) +} +func (m *ContainerStatusResponse) XXX_Size() int { + return m.Size() +} +func (m *ContainerStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerStatusResponse proto.InternalMessageInfo + +func (m *ContainerStatusResponse) GetStatus() *ContainerStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *ContainerStatusResponse) GetInfo() map[string]string { + if m != nil { + return m.Info + } + return nil +} + +// ContainerResources holds resource limits configuration for a container. +type ContainerResources struct { + // Resource limits configuration specific to Linux container. + Linux *LinuxContainerResources `protobuf:"bytes,1,opt,name=linux,proto3" json:"linux,omitempty"` + // Resource limits configuration specific to Windows container. + Windows *WindowsContainerResources `protobuf:"bytes,2,opt,name=windows,proto3" json:"windows,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerResources) Reset() { *m = ContainerResources{} } +func (*ContainerResources) ProtoMessage() {} +func (*ContainerResources) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{81} +} +func (m *ContainerResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerResources.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerResources.Merge(m, src) +} +func (m *ContainerResources) XXX_Size() int { + return m.Size() +} +func (m *ContainerResources) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerResources.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerResources proto.InternalMessageInfo + +func (m *ContainerResources) GetLinux() *LinuxContainerResources { + if m != nil { + return m.Linux + } + return nil +} + +func (m *ContainerResources) GetWindows() *WindowsContainerResources { + if m != nil { + return m.Windows + } + return nil +} + +type UpdateContainerResourcesRequest struct { + // ID of the container to update. + ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // Resource configuration specific to Linux containers. + Linux *LinuxContainerResources `protobuf:"bytes,2,opt,name=linux,proto3" json:"linux,omitempty"` + // Resource configuration specific to Windows containers. + Windows *WindowsContainerResources `protobuf:"bytes,3,opt,name=windows,proto3" json:"windows,omitempty"` + // Unstructured key-value map holding arbitrary additional information for + // container resources updating. This can be used for specifying experimental + // resources to update or other options to use when updating the container. + Annotations map[string]string `protobuf:"bytes,4,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateContainerResourcesRequest) Reset() { *m = UpdateContainerResourcesRequest{} } +func (*UpdateContainerResourcesRequest) ProtoMessage() {} +func (*UpdateContainerResourcesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{82} +} +func (m *UpdateContainerResourcesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateContainerResourcesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateContainerResourcesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateContainerResourcesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateContainerResourcesRequest.Merge(m, src) +} +func (m *UpdateContainerResourcesRequest) XXX_Size() int { + return m.Size() +} +func (m *UpdateContainerResourcesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateContainerResourcesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateContainerResourcesRequest proto.InternalMessageInfo + +func (m *UpdateContainerResourcesRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +func (m *UpdateContainerResourcesRequest) GetLinux() *LinuxContainerResources { + if m != nil { + return m.Linux + } + return nil +} + +func (m *UpdateContainerResourcesRequest) GetWindows() *WindowsContainerResources { + if m != nil { + return m.Windows + } + return nil +} + +func (m *UpdateContainerResourcesRequest) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +type UpdateContainerResourcesResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateContainerResourcesResponse) Reset() { *m = UpdateContainerResourcesResponse{} } +func (*UpdateContainerResourcesResponse) ProtoMessage() {} +func (*UpdateContainerResourcesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{83} +} +func (m *UpdateContainerResourcesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateContainerResourcesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateContainerResourcesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateContainerResourcesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateContainerResourcesResponse.Merge(m, src) +} +func (m *UpdateContainerResourcesResponse) XXX_Size() int { + return m.Size() +} +func (m *UpdateContainerResourcesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateContainerResourcesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateContainerResourcesResponse proto.InternalMessageInfo + +type ExecSyncRequest struct { + // ID of the container. + ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // Command to execute. + Cmd []string `protobuf:"bytes,2,rep,name=cmd,proto3" json:"cmd,omitempty"` + // Timeout in seconds to stop the command. Default: 0 (run forever). + Timeout int64 `protobuf:"varint,3,opt,name=timeout,proto3" json:"timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecSyncRequest) Reset() { *m = ExecSyncRequest{} } +func (*ExecSyncRequest) ProtoMessage() {} +func (*ExecSyncRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{84} +} +func (m *ExecSyncRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecSyncRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExecSyncRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExecSyncRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecSyncRequest.Merge(m, src) +} +func (m *ExecSyncRequest) XXX_Size() int { + return m.Size() +} +func (m *ExecSyncRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExecSyncRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecSyncRequest proto.InternalMessageInfo + +func (m *ExecSyncRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +func (m *ExecSyncRequest) GetCmd() []string { + if m != nil { + return m.Cmd + } + return nil +} + +func (m *ExecSyncRequest) GetTimeout() int64 { + if m != nil { + return m.Timeout + } + return 0 +} + +type ExecSyncResponse struct { + // Captured command stdout output. + // The runtime should cap the output of this response to 16MB. + // If the stdout of the command produces more than 16MB, the remaining output + // should be discarded, and the command should proceed with no error. + // See CVE-2022-1708 and CVE-2022-31030 for more information. + Stdout []byte `protobuf:"bytes,1,opt,name=stdout,proto3" json:"stdout,omitempty"` + // Captured command stderr output. + // The runtime should cap the output of this response to 16MB. + // If the stderr of the command produces more than 16MB, the remaining output + // should be discarded, and the command should proceed with no error. + // See CVE-2022-1708 and CVE-2022-31030 for more information. + Stderr []byte `protobuf:"bytes,2,opt,name=stderr,proto3" json:"stderr,omitempty"` + // Exit code the command finished with. Default: 0 (success). + ExitCode int32 `protobuf:"varint,3,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecSyncResponse) Reset() { *m = ExecSyncResponse{} } +func (*ExecSyncResponse) ProtoMessage() {} +func (*ExecSyncResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{85} +} +func (m *ExecSyncResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecSyncResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExecSyncResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExecSyncResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecSyncResponse.Merge(m, src) +} +func (m *ExecSyncResponse) XXX_Size() int { + return m.Size() +} +func (m *ExecSyncResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExecSyncResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecSyncResponse proto.InternalMessageInfo + +func (m *ExecSyncResponse) GetStdout() []byte { + if m != nil { + return m.Stdout + } + return nil +} + +func (m *ExecSyncResponse) GetStderr() []byte { + if m != nil { + return m.Stderr + } + return nil +} + +func (m *ExecSyncResponse) GetExitCode() int32 { + if m != nil { + return m.ExitCode + } + return 0 +} + +type ExecRequest struct { + // ID of the container in which to execute the command. + ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // Command to execute. + Cmd []string `protobuf:"bytes,2,rep,name=cmd,proto3" json:"cmd,omitempty"` + // Whether to exec the command in a TTY. + Tty bool `protobuf:"varint,3,opt,name=tty,proto3" json:"tty,omitempty"` + // Whether to stream stdin. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + Stdin bool `protobuf:"varint,4,opt,name=stdin,proto3" json:"stdin,omitempty"` + // Whether to stream stdout. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + Stdout bool `protobuf:"varint,5,opt,name=stdout,proto3" json:"stdout,omitempty"` + // Whether to stream stderr. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + // If `tty` is true, `stderr` MUST be false. Multiplexing is not supported + // in this case. The output of stdout and stderr will be combined to a + // single stream. + Stderr bool `protobuf:"varint,6,opt,name=stderr,proto3" json:"stderr,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecRequest) Reset() { *m = ExecRequest{} } +func (*ExecRequest) ProtoMessage() {} +func (*ExecRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{86} +} +func (m *ExecRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExecRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExecRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecRequest.Merge(m, src) +} +func (m *ExecRequest) XXX_Size() int { + return m.Size() +} +func (m *ExecRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExecRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecRequest proto.InternalMessageInfo + +func (m *ExecRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +func (m *ExecRequest) GetCmd() []string { + if m != nil { + return m.Cmd + } + return nil +} + +func (m *ExecRequest) GetTty() bool { + if m != nil { + return m.Tty + } + return false +} + +func (m *ExecRequest) GetStdin() bool { + if m != nil { + return m.Stdin + } + return false +} + +func (m *ExecRequest) GetStdout() bool { + if m != nil { + return m.Stdout + } + return false +} + +func (m *ExecRequest) GetStderr() bool { + if m != nil { + return m.Stderr + } + return false +} + +type ExecResponse struct { + // Fully qualified URL of the exec streaming server. + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExecResponse) Reset() { *m = ExecResponse{} } +func (*ExecResponse) ProtoMessage() {} +func (*ExecResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{87} +} +func (m *ExecResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExecResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExecResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecResponse.Merge(m, src) +} +func (m *ExecResponse) XXX_Size() int { + return m.Size() +} +func (m *ExecResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExecResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecResponse proto.InternalMessageInfo + +func (m *ExecResponse) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +type AttachRequest struct { + // ID of the container to which to attach. + ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // Whether to stream stdin. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + Stdin bool `protobuf:"varint,2,opt,name=stdin,proto3" json:"stdin,omitempty"` + // Whether the process being attached is running in a TTY. + // This must match the TTY setting in the ContainerConfig. + Tty bool `protobuf:"varint,3,opt,name=tty,proto3" json:"tty,omitempty"` + // Whether to stream stdout. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + Stdout bool `protobuf:"varint,4,opt,name=stdout,proto3" json:"stdout,omitempty"` + // Whether to stream stderr. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + // If `tty` is true, `stderr` MUST be false. Multiplexing is not supported + // in this case. The output of stdout and stderr will be combined to a + // single stream. + Stderr bool `protobuf:"varint,5,opt,name=stderr,proto3" json:"stderr,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttachRequest) Reset() { *m = AttachRequest{} } +func (*AttachRequest) ProtoMessage() {} +func (*AttachRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{88} +} +func (m *AttachRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AttachRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AttachRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AttachRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttachRequest.Merge(m, src) +} +func (m *AttachRequest) XXX_Size() int { + return m.Size() +} +func (m *AttachRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AttachRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AttachRequest proto.InternalMessageInfo + +func (m *AttachRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +func (m *AttachRequest) GetStdin() bool { + if m != nil { + return m.Stdin + } + return false +} + +func (m *AttachRequest) GetTty() bool { + if m != nil { + return m.Tty + } + return false +} + +func (m *AttachRequest) GetStdout() bool { + if m != nil { + return m.Stdout + } + return false +} + +func (m *AttachRequest) GetStderr() bool { + if m != nil { + return m.Stderr + } + return false +} + +type AttachResponse struct { + // Fully qualified URL of the attach streaming server. + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttachResponse) Reset() { *m = AttachResponse{} } +func (*AttachResponse) ProtoMessage() {} +func (*AttachResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{89} +} +func (m *AttachResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AttachResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AttachResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AttachResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttachResponse.Merge(m, src) +} +func (m *AttachResponse) XXX_Size() int { + return m.Size() +} +func (m *AttachResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AttachResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AttachResponse proto.InternalMessageInfo + +func (m *AttachResponse) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +type PortForwardRequest struct { + // ID of the container to which to forward the port. + PodSandboxId string `protobuf:"bytes,1,opt,name=pod_sandbox_id,json=podSandboxId,proto3" json:"pod_sandbox_id,omitempty"` + // Port to forward. + Port []int32 `protobuf:"varint,2,rep,packed,name=port,proto3" json:"port,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PortForwardRequest) Reset() { *m = PortForwardRequest{} } +func (*PortForwardRequest) ProtoMessage() {} +func (*PortForwardRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{90} +} +func (m *PortForwardRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PortForwardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PortForwardRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PortForwardRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortForwardRequest.Merge(m, src) +} +func (m *PortForwardRequest) XXX_Size() int { + return m.Size() +} +func (m *PortForwardRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PortForwardRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PortForwardRequest proto.InternalMessageInfo + +func (m *PortForwardRequest) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +func (m *PortForwardRequest) GetPort() []int32 { + if m != nil { + return m.Port + } + return nil +} + +type PortForwardResponse struct { + // Fully qualified URL of the port-forward streaming server. + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PortForwardResponse) Reset() { *m = PortForwardResponse{} } +func (*PortForwardResponse) ProtoMessage() {} +func (*PortForwardResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{91} +} +func (m *PortForwardResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PortForwardResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PortForwardResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PortForwardResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortForwardResponse.Merge(m, src) +} +func (m *PortForwardResponse) XXX_Size() int { + return m.Size() +} +func (m *PortForwardResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PortForwardResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PortForwardResponse proto.InternalMessageInfo + +func (m *PortForwardResponse) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +type ImageFilter struct { + // Spec of the image. + Image *ImageSpec `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImageFilter) Reset() { *m = ImageFilter{} } +func (*ImageFilter) ProtoMessage() {} +func (*ImageFilter) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{92} +} +func (m *ImageFilter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ImageFilter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ImageFilter) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageFilter.Merge(m, src) +} +func (m *ImageFilter) XXX_Size() int { + return m.Size() +} +func (m *ImageFilter) XXX_DiscardUnknown() { + xxx_messageInfo_ImageFilter.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageFilter proto.InternalMessageInfo + +func (m *ImageFilter) GetImage() *ImageSpec { + if m != nil { + return m.Image + } + return nil +} + +type ListImagesRequest struct { + // Filter to list images. + Filter *ImageFilter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListImagesRequest) Reset() { *m = ListImagesRequest{} } +func (*ListImagesRequest) ProtoMessage() {} +func (*ListImagesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{93} +} +func (m *ListImagesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListImagesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListImagesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListImagesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListImagesRequest.Merge(m, src) +} +func (m *ListImagesRequest) XXX_Size() int { + return m.Size() +} +func (m *ListImagesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListImagesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListImagesRequest proto.InternalMessageInfo + +func (m *ListImagesRequest) GetFilter() *ImageFilter { + if m != nil { + return m.Filter + } + return nil +} + +// Basic information about a container image. +type Image struct { + // ID of the image. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Other names by which this image is known. + RepoTags []string `protobuf:"bytes,2,rep,name=repo_tags,json=repoTags,proto3" json:"repo_tags,omitempty"` + // Digests by which this image is known. + RepoDigests []string `protobuf:"bytes,3,rep,name=repo_digests,json=repoDigests,proto3" json:"repo_digests,omitempty"` + // Size of the image in bytes. Must be > 0. + Size_ uint64 `protobuf:"varint,4,opt,name=size,proto3" json:"size,omitempty"` + // UID that will run the command(s). This is used as a default if no user is + // specified when creating the container. UID and the following user name + // are mutually exclusive. + Uid *Int64Value `protobuf:"bytes,5,opt,name=uid,proto3" json:"uid,omitempty"` + // User name that will run the command(s). This is used if UID is not set + // and no user is specified when creating container. + Username string `protobuf:"bytes,6,opt,name=username,proto3" json:"username,omitempty"` + // ImageSpec for image which includes annotations + Spec *ImageSpec `protobuf:"bytes,7,opt,name=spec,proto3" json:"spec,omitempty"` + // Recommendation on whether this image should be exempt from garbage collection. + // It must only be treated as a recommendation -- the client can still request that the image be deleted, + // and the runtime must oblige. + Pinned bool `protobuf:"varint,8,opt,name=pinned,proto3" json:"pinned,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Image) Reset() { *m = Image{} } +func (*Image) ProtoMessage() {} +func (*Image) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{94} +} +func (m *Image) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Image) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Image.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Image) XXX_Merge(src proto.Message) { + xxx_messageInfo_Image.Merge(m, src) +} +func (m *Image) XXX_Size() int { + return m.Size() +} +func (m *Image) XXX_DiscardUnknown() { + xxx_messageInfo_Image.DiscardUnknown(m) +} + +var xxx_messageInfo_Image proto.InternalMessageInfo + +func (m *Image) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Image) GetRepoTags() []string { + if m != nil { + return m.RepoTags + } + return nil +} + +func (m *Image) GetRepoDigests() []string { + if m != nil { + return m.RepoDigests + } + return nil +} + +func (m *Image) GetSize_() uint64 { + if m != nil { + return m.Size_ + } + return 0 +} + +func (m *Image) GetUid() *Int64Value { + if m != nil { + return m.Uid + } + return nil +} + +func (m *Image) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *Image) GetSpec() *ImageSpec { + if m != nil { + return m.Spec + } + return nil +} + +func (m *Image) GetPinned() bool { + if m != nil { + return m.Pinned + } + return false +} + +type ListImagesResponse struct { + // List of images. + Images []*Image `protobuf:"bytes,1,rep,name=images,proto3" json:"images,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListImagesResponse) Reset() { *m = ListImagesResponse{} } +func (*ListImagesResponse) ProtoMessage() {} +func (*ListImagesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{95} +} +func (m *ListImagesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListImagesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListImagesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListImagesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListImagesResponse.Merge(m, src) +} +func (m *ListImagesResponse) XXX_Size() int { + return m.Size() +} +func (m *ListImagesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListImagesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListImagesResponse proto.InternalMessageInfo + +func (m *ListImagesResponse) GetImages() []*Image { + if m != nil { + return m.Images + } + return nil +} + +type ImageStatusRequest struct { + // Spec of the image. + Image *ImageSpec `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` + // Verbose indicates whether to return extra information about the image. + Verbose bool `protobuf:"varint,2,opt,name=verbose,proto3" json:"verbose,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImageStatusRequest) Reset() { *m = ImageStatusRequest{} } +func (*ImageStatusRequest) ProtoMessage() {} +func (*ImageStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{96} +} +func (m *ImageStatusRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ImageStatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ImageStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStatusRequest.Merge(m, src) +} +func (m *ImageStatusRequest) XXX_Size() int { + return m.Size() +} +func (m *ImageStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStatusRequest proto.InternalMessageInfo + +func (m *ImageStatusRequest) GetImage() *ImageSpec { + if m != nil { + return m.Image + } + return nil +} + +func (m *ImageStatusRequest) GetVerbose() bool { + if m != nil { + return m.Verbose + } + return false +} + +type ImageStatusResponse struct { + // Status of the image. + Image *Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` + // Info is extra information of the Image. The key could be arbitrary string, and + // value should be in json format. The information could include anything useful + // for debug, e.g. image config for oci image based container runtime. + // It should only be returned non-empty when Verbose is true. + Info map[string]string `protobuf:"bytes,2,rep,name=info,proto3" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImageStatusResponse) Reset() { *m = ImageStatusResponse{} } +func (*ImageStatusResponse) ProtoMessage() {} +func (*ImageStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{97} +} +func (m *ImageStatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ImageStatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ImageStatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStatusResponse.Merge(m, src) +} +func (m *ImageStatusResponse) XXX_Size() int { + return m.Size() +} +func (m *ImageStatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStatusResponse proto.InternalMessageInfo + +func (m *ImageStatusResponse) GetImage() *Image { + if m != nil { + return m.Image + } + return nil +} + +func (m *ImageStatusResponse) GetInfo() map[string]string { + if m != nil { + return m.Info + } + return nil +} + +// AuthConfig contains authorization information for connecting to a registry. +type AuthConfig struct { + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + Auth string `protobuf:"bytes,3,opt,name=auth,proto3" json:"auth,omitempty"` + ServerAddress string `protobuf:"bytes,4,opt,name=server_address,json=serverAddress,proto3" json:"server_address,omitempty"` + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `protobuf:"bytes,5,opt,name=identity_token,json=identityToken,proto3" json:"identity_token,omitempty"` + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `protobuf:"bytes,6,opt,name=registry_token,json=registryToken,proto3" json:"registry_token,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AuthConfig) Reset() { *m = AuthConfig{} } +func (*AuthConfig) ProtoMessage() {} +func (*AuthConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{98} +} +func (m *AuthConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuthConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuthConfig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuthConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuthConfig.Merge(m, src) +} +func (m *AuthConfig) XXX_Size() int { + return m.Size() +} +func (m *AuthConfig) XXX_DiscardUnknown() { + xxx_messageInfo_AuthConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_AuthConfig proto.InternalMessageInfo + +func (m *AuthConfig) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *AuthConfig) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *AuthConfig) GetAuth() string { + if m != nil { + return m.Auth + } + return "" +} + +func (m *AuthConfig) GetServerAddress() string { + if m != nil { + return m.ServerAddress + } + return "" +} + +func (m *AuthConfig) GetIdentityToken() string { + if m != nil { + return m.IdentityToken + } + return "" +} + +func (m *AuthConfig) GetRegistryToken() string { + if m != nil { + return m.RegistryToken + } + return "" +} + +type PullImageRequest struct { + // Spec of the image. + Image *ImageSpec `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` + // Authentication configuration for pulling the image. + Auth *AuthConfig `protobuf:"bytes,2,opt,name=auth,proto3" json:"auth,omitempty"` + // Config of the PodSandbox, which is used to pull image in PodSandbox context. + SandboxConfig *PodSandboxConfig `protobuf:"bytes,3,opt,name=sandbox_config,json=sandboxConfig,proto3" json:"sandbox_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PullImageRequest) Reset() { *m = PullImageRequest{} } +func (*PullImageRequest) ProtoMessage() {} +func (*PullImageRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{99} +} +func (m *PullImageRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PullImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PullImageRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PullImageRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PullImageRequest.Merge(m, src) +} +func (m *PullImageRequest) XXX_Size() int { + return m.Size() +} +func (m *PullImageRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PullImageRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PullImageRequest proto.InternalMessageInfo + +func (m *PullImageRequest) GetImage() *ImageSpec { + if m != nil { + return m.Image + } + return nil +} + +func (m *PullImageRequest) GetAuth() *AuthConfig { + if m != nil { + return m.Auth + } + return nil +} + +func (m *PullImageRequest) GetSandboxConfig() *PodSandboxConfig { + if m != nil { + return m.SandboxConfig + } + return nil +} + +type PullImageResponse struct { + // Reference to the image in use. For most runtimes, this should be an + // image ID or digest. + ImageRef string `protobuf:"bytes,1,opt,name=image_ref,json=imageRef,proto3" json:"image_ref,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PullImageResponse) Reset() { *m = PullImageResponse{} } +func (*PullImageResponse) ProtoMessage() {} +func (*PullImageResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{100} +} +func (m *PullImageResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PullImageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PullImageResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PullImageResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PullImageResponse.Merge(m, src) +} +func (m *PullImageResponse) XXX_Size() int { + return m.Size() +} +func (m *PullImageResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PullImageResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PullImageResponse proto.InternalMessageInfo + +func (m *PullImageResponse) GetImageRef() string { + if m != nil { + return m.ImageRef + } + return "" +} + +type RemoveImageRequest struct { + // Spec of the image to remove. + Image *ImageSpec `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoveImageRequest) Reset() { *m = RemoveImageRequest{} } +func (*RemoveImageRequest) ProtoMessage() {} +func (*RemoveImageRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{101} +} +func (m *RemoveImageRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveImageRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveImageRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveImageRequest.Merge(m, src) +} +func (m *RemoveImageRequest) XXX_Size() int { + return m.Size() +} +func (m *RemoveImageRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveImageRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveImageRequest proto.InternalMessageInfo + +func (m *RemoveImageRequest) GetImage() *ImageSpec { + if m != nil { + return m.Image + } + return nil +} + +type RemoveImageResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RemoveImageResponse) Reset() { *m = RemoveImageResponse{} } +func (*RemoveImageResponse) ProtoMessage() {} +func (*RemoveImageResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{102} +} +func (m *RemoveImageResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveImageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveImageResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveImageResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveImageResponse.Merge(m, src) +} +func (m *RemoveImageResponse) XXX_Size() int { + return m.Size() +} +func (m *RemoveImageResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveImageResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveImageResponse proto.InternalMessageInfo + +type NetworkConfig struct { + // CIDR to use for pod IP addresses. If the CIDR is empty, runtimes + // should omit it. + PodCidr string `protobuf:"bytes,1,opt,name=pod_cidr,json=podCidr,proto3" json:"pod_cidr,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NetworkConfig) Reset() { *m = NetworkConfig{} } +func (*NetworkConfig) ProtoMessage() {} +func (*NetworkConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{103} +} +func (m *NetworkConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NetworkConfig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NetworkConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkConfig.Merge(m, src) +} +func (m *NetworkConfig) XXX_Size() int { + return m.Size() +} +func (m *NetworkConfig) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkConfig proto.InternalMessageInfo + +func (m *NetworkConfig) GetPodCidr() string { + if m != nil { + return m.PodCidr + } + return "" +} + +type RuntimeConfig struct { + NetworkConfig *NetworkConfig `protobuf:"bytes,1,opt,name=network_config,json=networkConfig,proto3" json:"network_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RuntimeConfig) Reset() { *m = RuntimeConfig{} } +func (*RuntimeConfig) ProtoMessage() {} +func (*RuntimeConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{104} +} +func (m *RuntimeConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RuntimeConfig.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RuntimeConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeConfig.Merge(m, src) +} +func (m *RuntimeConfig) XXX_Size() int { + return m.Size() +} +func (m *RuntimeConfig) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeConfig proto.InternalMessageInfo + +func (m *RuntimeConfig) GetNetworkConfig() *NetworkConfig { + if m != nil { + return m.NetworkConfig + } + return nil +} + +type UpdateRuntimeConfigRequest struct { + RuntimeConfig *RuntimeConfig `protobuf:"bytes,1,opt,name=runtime_config,json=runtimeConfig,proto3" json:"runtime_config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateRuntimeConfigRequest) Reset() { *m = UpdateRuntimeConfigRequest{} } +func (*UpdateRuntimeConfigRequest) ProtoMessage() {} +func (*UpdateRuntimeConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{105} +} +func (m *UpdateRuntimeConfigRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateRuntimeConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateRuntimeConfigRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateRuntimeConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateRuntimeConfigRequest.Merge(m, src) +} +func (m *UpdateRuntimeConfigRequest) XXX_Size() int { + return m.Size() +} +func (m *UpdateRuntimeConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateRuntimeConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateRuntimeConfigRequest proto.InternalMessageInfo + +func (m *UpdateRuntimeConfigRequest) GetRuntimeConfig() *RuntimeConfig { + if m != nil { + return m.RuntimeConfig + } + return nil +} + +type UpdateRuntimeConfigResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdateRuntimeConfigResponse) Reset() { *m = UpdateRuntimeConfigResponse{} } +func (*UpdateRuntimeConfigResponse) ProtoMessage() {} +func (*UpdateRuntimeConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{106} +} +func (m *UpdateRuntimeConfigResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateRuntimeConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UpdateRuntimeConfigResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UpdateRuntimeConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateRuntimeConfigResponse.Merge(m, src) +} +func (m *UpdateRuntimeConfigResponse) XXX_Size() int { + return m.Size() +} +func (m *UpdateRuntimeConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateRuntimeConfigResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateRuntimeConfigResponse proto.InternalMessageInfo + +// RuntimeCondition contains condition information for the runtime. +// There are 2 kinds of runtime conditions: +// 1. Required conditions: Conditions are required for kubelet to work +// properly. If any required condition is unmet, the node will be not ready. +// The required conditions include: +// - RuntimeReady: RuntimeReady means the runtime is up and ready to accept +// basic containers e.g. container only needs host network. +// - NetworkReady: NetworkReady means the runtime network is up and ready to +// accept containers which require container network. +// +// 2. Optional conditions: Conditions are informative to the user, but kubelet +// will not rely on. Since condition type is an arbitrary string, all conditions +// not required are optional. These conditions will be exposed to users to help +// them understand the status of the system. +type RuntimeCondition struct { + // Type of runtime condition. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Status of the condition, one of true/false. Default: false. + Status bool `protobuf:"varint,2,opt,name=status,proto3" json:"status,omitempty"` + // Brief CamelCase string containing reason for the condition's last transition. + Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"` + // Human-readable message indicating details about last transition. + Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RuntimeCondition) Reset() { *m = RuntimeCondition{} } +func (*RuntimeCondition) ProtoMessage() {} +func (*RuntimeCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{107} +} +func (m *RuntimeCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RuntimeCondition.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RuntimeCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeCondition.Merge(m, src) +} +func (m *RuntimeCondition) XXX_Size() int { + return m.Size() +} +func (m *RuntimeCondition) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeCondition proto.InternalMessageInfo + +func (m *RuntimeCondition) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *RuntimeCondition) GetStatus() bool { + if m != nil { + return m.Status + } + return false +} + +func (m *RuntimeCondition) GetReason() string { + if m != nil { + return m.Reason + } + return "" +} + +func (m *RuntimeCondition) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +// RuntimeStatus is information about the current status of the runtime. +type RuntimeStatus struct { + // List of current observed runtime conditions. + Conditions []*RuntimeCondition `protobuf:"bytes,1,rep,name=conditions,proto3" json:"conditions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RuntimeStatus) Reset() { *m = RuntimeStatus{} } +func (*RuntimeStatus) ProtoMessage() {} +func (*RuntimeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{108} +} +func (m *RuntimeStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RuntimeStatus.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RuntimeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeStatus.Merge(m, src) +} +func (m *RuntimeStatus) XXX_Size() int { + return m.Size() +} +func (m *RuntimeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeStatus proto.InternalMessageInfo + +func (m *RuntimeStatus) GetConditions() []*RuntimeCondition { + if m != nil { + return m.Conditions + } + return nil +} + +type StatusRequest struct { + // Verbose indicates whether to return extra information about the runtime. + Verbose bool `protobuf:"varint,1,opt,name=verbose,proto3" json:"verbose,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatusRequest) Reset() { *m = StatusRequest{} } +func (*StatusRequest) ProtoMessage() {} +func (*StatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{109} +} +func (m *StatusRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusRequest.Merge(m, src) +} +func (m *StatusRequest) XXX_Size() int { + return m.Size() +} +func (m *StatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusRequest proto.InternalMessageInfo + +func (m *StatusRequest) GetVerbose() bool { + if m != nil { + return m.Verbose + } + return false +} + +type StatusResponse struct { + // Status of the Runtime. + Status *RuntimeStatus `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // Info is extra information of the Runtime. The key could be arbitrary string, and + // value should be in json format. The information could include anything useful for + // debug, e.g. plugins used by the container runtime. + // It should only be returned non-empty when Verbose is true. + Info map[string]string `protobuf:"bytes,2,rep,name=info,proto3" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatusResponse) Reset() { *m = StatusResponse{} } +func (*StatusResponse) ProtoMessage() {} +func (*StatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{110} +} +func (m *StatusResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StatusResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusResponse.Merge(m, src) +} +func (m *StatusResponse) XXX_Size() int { + return m.Size() +} +func (m *StatusResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StatusResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusResponse proto.InternalMessageInfo + +func (m *StatusResponse) GetStatus() *RuntimeStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *StatusResponse) GetInfo() map[string]string { + if m != nil { + return m.Info + } + return nil +} + +type ImageFsInfoRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImageFsInfoRequest) Reset() { *m = ImageFsInfoRequest{} } +func (*ImageFsInfoRequest) ProtoMessage() {} +func (*ImageFsInfoRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{111} +} +func (m *ImageFsInfoRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageFsInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ImageFsInfoRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ImageFsInfoRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageFsInfoRequest.Merge(m, src) +} +func (m *ImageFsInfoRequest) XXX_Size() int { + return m.Size() +} +func (m *ImageFsInfoRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ImageFsInfoRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageFsInfoRequest proto.InternalMessageInfo + +// UInt64Value is the wrapper of uint64. +type UInt64Value struct { + // The value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{112} +} +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(m, src) +} +func (m *UInt64Value) XXX_Size() int { + return m.Size() +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +// FilesystemIdentifier uniquely identify the filesystem. +type FilesystemIdentifier struct { + // Mountpoint of a filesystem. + Mountpoint string `protobuf:"bytes,1,opt,name=mountpoint,proto3" json:"mountpoint,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FilesystemIdentifier) Reset() { *m = FilesystemIdentifier{} } +func (*FilesystemIdentifier) ProtoMessage() {} +func (*FilesystemIdentifier) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{113} +} +func (m *FilesystemIdentifier) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FilesystemIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FilesystemIdentifier.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FilesystemIdentifier) XXX_Merge(src proto.Message) { + xxx_messageInfo_FilesystemIdentifier.Merge(m, src) +} +func (m *FilesystemIdentifier) XXX_Size() int { + return m.Size() +} +func (m *FilesystemIdentifier) XXX_DiscardUnknown() { + xxx_messageInfo_FilesystemIdentifier.DiscardUnknown(m) +} + +var xxx_messageInfo_FilesystemIdentifier proto.InternalMessageInfo + +func (m *FilesystemIdentifier) GetMountpoint() string { + if m != nil { + return m.Mountpoint + } + return "" +} + +// FilesystemUsage provides the filesystem usage information. +type FilesystemUsage struct { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The unique identifier of the filesystem. + FsId *FilesystemIdentifier `protobuf:"bytes,2,opt,name=fs_id,json=fsId,proto3" json:"fs_id,omitempty"` + // UsedBytes represents the bytes used for images on the filesystem. + // This may differ from the total bytes used on the filesystem and may not + // equal CapacityBytes - AvailableBytes. + UsedBytes *UInt64Value `protobuf:"bytes,3,opt,name=used_bytes,json=usedBytes,proto3" json:"used_bytes,omitempty"` + // InodesUsed represents the inodes used by the images. + // This may not equal InodesCapacity - InodesAvailable because the underlying + // filesystem may also be used for purposes other than storing images. + InodesUsed *UInt64Value `protobuf:"bytes,4,opt,name=inodes_used,json=inodesUsed,proto3" json:"inodes_used,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FilesystemUsage) Reset() { *m = FilesystemUsage{} } +func (*FilesystemUsage) ProtoMessage() {} +func (*FilesystemUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{114} +} +func (m *FilesystemUsage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FilesystemUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FilesystemUsage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FilesystemUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_FilesystemUsage.Merge(m, src) +} +func (m *FilesystemUsage) XXX_Size() int { + return m.Size() +} +func (m *FilesystemUsage) XXX_DiscardUnknown() { + xxx_messageInfo_FilesystemUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_FilesystemUsage proto.InternalMessageInfo + +func (m *FilesystemUsage) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *FilesystemUsage) GetFsId() *FilesystemIdentifier { + if m != nil { + return m.FsId + } + return nil +} + +func (m *FilesystemUsage) GetUsedBytes() *UInt64Value { + if m != nil { + return m.UsedBytes + } + return nil +} + +func (m *FilesystemUsage) GetInodesUsed() *UInt64Value { + if m != nil { + return m.InodesUsed + } + return nil +} + +// WindowsFilesystemUsage provides the filesystem usage information specific to Windows. +type WindowsFilesystemUsage struct { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The unique identifier of the filesystem. + FsId *FilesystemIdentifier `protobuf:"bytes,2,opt,name=fs_id,json=fsId,proto3" json:"fs_id,omitempty"` + // UsedBytes represents the bytes used for images on the filesystem. + // This may differ from the total bytes used on the filesystem and may not + // equal CapacityBytes - AvailableBytes. + UsedBytes *UInt64Value `protobuf:"bytes,3,opt,name=used_bytes,json=usedBytes,proto3" json:"used_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WindowsFilesystemUsage) Reset() { *m = WindowsFilesystemUsage{} } +func (*WindowsFilesystemUsage) ProtoMessage() {} +func (*WindowsFilesystemUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{115} +} +func (m *WindowsFilesystemUsage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsFilesystemUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WindowsFilesystemUsage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WindowsFilesystemUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsFilesystemUsage.Merge(m, src) +} +func (m *WindowsFilesystemUsage) XXX_Size() int { + return m.Size() +} +func (m *WindowsFilesystemUsage) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsFilesystemUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_WindowsFilesystemUsage proto.InternalMessageInfo + +func (m *WindowsFilesystemUsage) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *WindowsFilesystemUsage) GetFsId() *FilesystemIdentifier { + if m != nil { + return m.FsId + } + return nil +} + +func (m *WindowsFilesystemUsage) GetUsedBytes() *UInt64Value { + if m != nil { + return m.UsedBytes + } + return nil +} + +type ImageFsInfoResponse struct { + // Information of image filesystem(s). + ImageFilesystems []*FilesystemUsage `protobuf:"bytes,1,rep,name=image_filesystems,json=imageFilesystems,proto3" json:"image_filesystems,omitempty"` + // Information of container filesystem(s). + // This is an optional field, may be used for example if container and image + // storage are separated. + // Default will be to return this as empty. + ContainerFilesystems []*FilesystemUsage `protobuf:"bytes,2,rep,name=container_filesystems,json=containerFilesystems,proto3" json:"container_filesystems,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImageFsInfoResponse) Reset() { *m = ImageFsInfoResponse{} } +func (*ImageFsInfoResponse) ProtoMessage() {} +func (*ImageFsInfoResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{116} +} +func (m *ImageFsInfoResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageFsInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ImageFsInfoResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ImageFsInfoResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageFsInfoResponse.Merge(m, src) +} +func (m *ImageFsInfoResponse) XXX_Size() int { + return m.Size() +} +func (m *ImageFsInfoResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ImageFsInfoResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageFsInfoResponse proto.InternalMessageInfo + +func (m *ImageFsInfoResponse) GetImageFilesystems() []*FilesystemUsage { + if m != nil { + return m.ImageFilesystems + } + return nil +} + +func (m *ImageFsInfoResponse) GetContainerFilesystems() []*FilesystemUsage { + if m != nil { + return m.ContainerFilesystems + } + return nil +} + +type ContainerStatsRequest struct { + // ID of the container for which to retrieve stats. + ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerStatsRequest) Reset() { *m = ContainerStatsRequest{} } +func (*ContainerStatsRequest) ProtoMessage() {} +func (*ContainerStatsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{117} +} +func (m *ContainerStatsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerStatsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerStatsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStatsRequest.Merge(m, src) +} +func (m *ContainerStatsRequest) XXX_Size() int { + return m.Size() +} +func (m *ContainerStatsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStatsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerStatsRequest proto.InternalMessageInfo + +func (m *ContainerStatsRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +type ContainerStatsResponse struct { + // Stats of the container. + Stats *ContainerStats `protobuf:"bytes,1,opt,name=stats,proto3" json:"stats,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerStatsResponse) Reset() { *m = ContainerStatsResponse{} } +func (*ContainerStatsResponse) ProtoMessage() {} +func (*ContainerStatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{118} +} +func (m *ContainerStatsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerStatsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStatsResponse.Merge(m, src) +} +func (m *ContainerStatsResponse) XXX_Size() int { + return m.Size() +} +func (m *ContainerStatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStatsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerStatsResponse proto.InternalMessageInfo + +func (m *ContainerStatsResponse) GetStats() *ContainerStats { + if m != nil { + return m.Stats + } + return nil +} + +type ListContainerStatsRequest struct { + // Filter for the list request. + Filter *ContainerStatsFilter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListContainerStatsRequest) Reset() { *m = ListContainerStatsRequest{} } +func (*ListContainerStatsRequest) ProtoMessage() {} +func (*ListContainerStatsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{119} +} +func (m *ListContainerStatsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListContainerStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListContainerStatsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListContainerStatsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListContainerStatsRequest.Merge(m, src) +} +func (m *ListContainerStatsRequest) XXX_Size() int { + return m.Size() +} +func (m *ListContainerStatsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListContainerStatsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListContainerStatsRequest proto.InternalMessageInfo + +func (m *ListContainerStatsRequest) GetFilter() *ContainerStatsFilter { + if m != nil { + return m.Filter + } + return nil +} + +// ContainerStatsFilter is used to filter containers. +// All those fields are combined with 'AND' +type ContainerStatsFilter struct { + // ID of the container. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // ID of the PodSandbox. + PodSandboxId string `protobuf:"bytes,2,opt,name=pod_sandbox_id,json=podSandboxId,proto3" json:"pod_sandbox_id,omitempty"` + // LabelSelector to select matches. + // Only api.MatchLabels is supported for now and the requirements + // are ANDed. MatchExpressions is not supported yet. + LabelSelector map[string]string `protobuf:"bytes,3,rep,name=label_selector,json=labelSelector,proto3" json:"label_selector,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerStatsFilter) Reset() { *m = ContainerStatsFilter{} } +func (*ContainerStatsFilter) ProtoMessage() {} +func (*ContainerStatsFilter) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{120} +} +func (m *ContainerStatsFilter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStatsFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerStatsFilter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerStatsFilter) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStatsFilter.Merge(m, src) +} +func (m *ContainerStatsFilter) XXX_Size() int { + return m.Size() +} +func (m *ContainerStatsFilter) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStatsFilter.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerStatsFilter proto.InternalMessageInfo + +func (m *ContainerStatsFilter) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ContainerStatsFilter) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +func (m *ContainerStatsFilter) GetLabelSelector() map[string]string { + if m != nil { + return m.LabelSelector + } + return nil +} + +type ListContainerStatsResponse struct { + // Stats of the container. + Stats []*ContainerStats `protobuf:"bytes,1,rep,name=stats,proto3" json:"stats,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListContainerStatsResponse) Reset() { *m = ListContainerStatsResponse{} } +func (*ListContainerStatsResponse) ProtoMessage() {} +func (*ListContainerStatsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{121} +} +func (m *ListContainerStatsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListContainerStatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListContainerStatsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListContainerStatsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListContainerStatsResponse.Merge(m, src) +} +func (m *ListContainerStatsResponse) XXX_Size() int { + return m.Size() +} +func (m *ListContainerStatsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListContainerStatsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListContainerStatsResponse proto.InternalMessageInfo + +func (m *ListContainerStatsResponse) GetStats() []*ContainerStats { + if m != nil { + return m.Stats + } + return nil +} + +// ContainerAttributes provides basic information of the container. +type ContainerAttributes struct { + // ID of the container. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Metadata of the container. + Metadata *ContainerMetadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Key-value pairs that may be used to scope and select individual resources. + Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Unstructured key-value map holding arbitrary metadata. + // Annotations MUST NOT be altered by the runtime; the value of this field + // MUST be identical to that of the corresponding ContainerConfig used to + // instantiate the Container this status represents. + Annotations map[string]string `protobuf:"bytes,4,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerAttributes) Reset() { *m = ContainerAttributes{} } +func (*ContainerAttributes) ProtoMessage() {} +func (*ContainerAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{122} +} +func (m *ContainerAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerAttributes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerAttributes.Merge(m, src) +} +func (m *ContainerAttributes) XXX_Size() int { + return m.Size() +} +func (m *ContainerAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerAttributes proto.InternalMessageInfo + +func (m *ContainerAttributes) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ContainerAttributes) GetMetadata() *ContainerMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +func (m *ContainerAttributes) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ContainerAttributes) GetAnnotations() map[string]string { + if m != nil { + return m.Annotations + } + return nil +} + +// ContainerStats provides the resource usage statistics for a container. +type ContainerStats struct { + // Information of the container. + Attributes *ContainerAttributes `protobuf:"bytes,1,opt,name=attributes,proto3" json:"attributes,omitempty"` + // CPU usage gathered from the container. + Cpu *CpuUsage `protobuf:"bytes,2,opt,name=cpu,proto3" json:"cpu,omitempty"` + // Memory usage gathered from the container. + Memory *MemoryUsage `protobuf:"bytes,3,opt,name=memory,proto3" json:"memory,omitempty"` + // Usage of the writable layer. + WritableLayer *FilesystemUsage `protobuf:"bytes,4,opt,name=writable_layer,json=writableLayer,proto3" json:"writable_layer,omitempty"` + // Swap usage gathered from the container. + Swap *SwapUsage `protobuf:"bytes,5,opt,name=swap,proto3" json:"swap,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerStats) Reset() { *m = ContainerStats{} } +func (*ContainerStats) ProtoMessage() {} +func (*ContainerStats) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{123} +} +func (m *ContainerStats) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerStats.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStats.Merge(m, src) +} +func (m *ContainerStats) XXX_Size() int { + return m.Size() +} +func (m *ContainerStats) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStats.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerStats proto.InternalMessageInfo + +func (m *ContainerStats) GetAttributes() *ContainerAttributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *ContainerStats) GetCpu() *CpuUsage { + if m != nil { + return m.Cpu + } + return nil +} + +func (m *ContainerStats) GetMemory() *MemoryUsage { + if m != nil { + return m.Memory + } + return nil +} + +func (m *ContainerStats) GetWritableLayer() *FilesystemUsage { + if m != nil { + return m.WritableLayer + } + return nil +} + +func (m *ContainerStats) GetSwap() *SwapUsage { + if m != nil { + return m.Swap + } + return nil +} + +// WindowsContainerStats provides the resource usage statistics for a container specific for Windows +type WindowsContainerStats struct { + // Information of the container. + Attributes *ContainerAttributes `protobuf:"bytes,1,opt,name=attributes,proto3" json:"attributes,omitempty"` + // CPU usage gathered from the container. + Cpu *WindowsCpuUsage `protobuf:"bytes,2,opt,name=cpu,proto3" json:"cpu,omitempty"` + // Memory usage gathered from the container. + Memory *WindowsMemoryUsage `protobuf:"bytes,3,opt,name=memory,proto3" json:"memory,omitempty"` + // Usage of the writable layer. + WritableLayer *WindowsFilesystemUsage `protobuf:"bytes,4,opt,name=writable_layer,json=writableLayer,proto3" json:"writable_layer,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WindowsContainerStats) Reset() { *m = WindowsContainerStats{} } +func (*WindowsContainerStats) ProtoMessage() {} +func (*WindowsContainerStats) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{124} +} +func (m *WindowsContainerStats) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsContainerStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WindowsContainerStats.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WindowsContainerStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsContainerStats.Merge(m, src) +} +func (m *WindowsContainerStats) XXX_Size() int { + return m.Size() +} +func (m *WindowsContainerStats) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsContainerStats.DiscardUnknown(m) +} + +var xxx_messageInfo_WindowsContainerStats proto.InternalMessageInfo + +func (m *WindowsContainerStats) GetAttributes() *ContainerAttributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *WindowsContainerStats) GetCpu() *WindowsCpuUsage { + if m != nil { + return m.Cpu + } + return nil +} + +func (m *WindowsContainerStats) GetMemory() *WindowsMemoryUsage { + if m != nil { + return m.Memory + } + return nil +} + +func (m *WindowsContainerStats) GetWritableLayer() *WindowsFilesystemUsage { + if m != nil { + return m.WritableLayer + } + return nil +} + +// CpuUsage provides the CPU usage information. +type CpuUsage struct { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Cumulative CPU usage (sum across all cores) since object creation. + UsageCoreNanoSeconds *UInt64Value `protobuf:"bytes,2,opt,name=usage_core_nano_seconds,json=usageCoreNanoSeconds,proto3" json:"usage_core_nano_seconds,omitempty"` + // Total CPU usage (sum of all cores) averaged over the sample window. + // The "core" unit can be interpreted as CPU core-nanoseconds per second. + UsageNanoCores *UInt64Value `protobuf:"bytes,3,opt,name=usage_nano_cores,json=usageNanoCores,proto3" json:"usage_nano_cores,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CpuUsage) Reset() { *m = CpuUsage{} } +func (*CpuUsage) ProtoMessage() {} +func (*CpuUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{125} +} +func (m *CpuUsage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CpuUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CpuUsage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CpuUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_CpuUsage.Merge(m, src) +} +func (m *CpuUsage) XXX_Size() int { + return m.Size() +} +func (m *CpuUsage) XXX_DiscardUnknown() { + xxx_messageInfo_CpuUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_CpuUsage proto.InternalMessageInfo + +func (m *CpuUsage) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *CpuUsage) GetUsageCoreNanoSeconds() *UInt64Value { + if m != nil { + return m.UsageCoreNanoSeconds + } + return nil +} + +func (m *CpuUsage) GetUsageNanoCores() *UInt64Value { + if m != nil { + return m.UsageNanoCores + } + return nil +} + +// WindowsCpuUsage provides the CPU usage information specific to Windows +type WindowsCpuUsage struct { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Cumulative CPU usage (sum across all cores) since object creation. + UsageCoreNanoSeconds *UInt64Value `protobuf:"bytes,2,opt,name=usage_core_nano_seconds,json=usageCoreNanoSeconds,proto3" json:"usage_core_nano_seconds,omitempty"` + // Total CPU usage (sum of all cores) averaged over the sample window. + // The "core" unit can be interpreted as CPU core-nanoseconds per second. + UsageNanoCores *UInt64Value `protobuf:"bytes,3,opt,name=usage_nano_cores,json=usageNanoCores,proto3" json:"usage_nano_cores,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WindowsCpuUsage) Reset() { *m = WindowsCpuUsage{} } +func (*WindowsCpuUsage) ProtoMessage() {} +func (*WindowsCpuUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{126} +} +func (m *WindowsCpuUsage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsCpuUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WindowsCpuUsage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WindowsCpuUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsCpuUsage.Merge(m, src) +} +func (m *WindowsCpuUsage) XXX_Size() int { + return m.Size() +} +func (m *WindowsCpuUsage) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsCpuUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_WindowsCpuUsage proto.InternalMessageInfo + +func (m *WindowsCpuUsage) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *WindowsCpuUsage) GetUsageCoreNanoSeconds() *UInt64Value { + if m != nil { + return m.UsageCoreNanoSeconds + } + return nil +} + +func (m *WindowsCpuUsage) GetUsageNanoCores() *UInt64Value { + if m != nil { + return m.UsageNanoCores + } + return nil +} + +// MemoryUsage provides the memory usage information. +type MemoryUsage struct { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The amount of working set memory in bytes. + WorkingSetBytes *UInt64Value `protobuf:"bytes,2,opt,name=working_set_bytes,json=workingSetBytes,proto3" json:"working_set_bytes,omitempty"` + // Available memory for use. This is defined as the memory limit - workingSetBytes. + AvailableBytes *UInt64Value `protobuf:"bytes,3,opt,name=available_bytes,json=availableBytes,proto3" json:"available_bytes,omitempty"` + // Total memory in use. This includes all memory regardless of when it was accessed. + UsageBytes *UInt64Value `protobuf:"bytes,4,opt,name=usage_bytes,json=usageBytes,proto3" json:"usage_bytes,omitempty"` + // The amount of anonymous and swap cache memory (includes transparent hugepages). + RssBytes *UInt64Value `protobuf:"bytes,5,opt,name=rss_bytes,json=rssBytes,proto3" json:"rss_bytes,omitempty"` + // Cumulative number of minor page faults. + PageFaults *UInt64Value `protobuf:"bytes,6,opt,name=page_faults,json=pageFaults,proto3" json:"page_faults,omitempty"` + // Cumulative number of major page faults. + MajorPageFaults *UInt64Value `protobuf:"bytes,7,opt,name=major_page_faults,json=majorPageFaults,proto3" json:"major_page_faults,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MemoryUsage) Reset() { *m = MemoryUsage{} } +func (*MemoryUsage) ProtoMessage() {} +func (*MemoryUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{127} +} +func (m *MemoryUsage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MemoryUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MemoryUsage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MemoryUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_MemoryUsage.Merge(m, src) +} +func (m *MemoryUsage) XXX_Size() int { + return m.Size() +} +func (m *MemoryUsage) XXX_DiscardUnknown() { + xxx_messageInfo_MemoryUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_MemoryUsage proto.InternalMessageInfo + +func (m *MemoryUsage) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *MemoryUsage) GetWorkingSetBytes() *UInt64Value { + if m != nil { + return m.WorkingSetBytes + } + return nil +} + +func (m *MemoryUsage) GetAvailableBytes() *UInt64Value { + if m != nil { + return m.AvailableBytes + } + return nil +} + +func (m *MemoryUsage) GetUsageBytes() *UInt64Value { + if m != nil { + return m.UsageBytes + } + return nil +} + +func (m *MemoryUsage) GetRssBytes() *UInt64Value { + if m != nil { + return m.RssBytes + } + return nil +} + +func (m *MemoryUsage) GetPageFaults() *UInt64Value { + if m != nil { + return m.PageFaults + } + return nil +} + +func (m *MemoryUsage) GetMajorPageFaults() *UInt64Value { + if m != nil { + return m.MajorPageFaults + } + return nil +} + +type SwapUsage struct { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Available swap for use. This is defined as the swap limit - swapUsageBytes. + SwapAvailableBytes *UInt64Value `protobuf:"bytes,2,opt,name=swap_available_bytes,json=swapAvailableBytes,proto3" json:"swap_available_bytes,omitempty"` + // Total memory in use. This includes all memory regardless of when it was accessed. + SwapUsageBytes *UInt64Value `protobuf:"bytes,3,opt,name=swap_usage_bytes,json=swapUsageBytes,proto3" json:"swap_usage_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SwapUsage) Reset() { *m = SwapUsage{} } +func (*SwapUsage) ProtoMessage() {} +func (*SwapUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{128} +} +func (m *SwapUsage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SwapUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SwapUsage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SwapUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_SwapUsage.Merge(m, src) +} +func (m *SwapUsage) XXX_Size() int { + return m.Size() +} +func (m *SwapUsage) XXX_DiscardUnknown() { + xxx_messageInfo_SwapUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_SwapUsage proto.InternalMessageInfo + +func (m *SwapUsage) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *SwapUsage) GetSwapAvailableBytes() *UInt64Value { + if m != nil { + return m.SwapAvailableBytes + } + return nil +} + +func (m *SwapUsage) GetSwapUsageBytes() *UInt64Value { + if m != nil { + return m.SwapUsageBytes + } + return nil +} + +// WindowsMemoryUsage provides the memory usage information specific to Windows +type WindowsMemoryUsage struct { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The amount of working set memory in bytes. + WorkingSetBytes *UInt64Value `protobuf:"bytes,2,opt,name=working_set_bytes,json=workingSetBytes,proto3" json:"working_set_bytes,omitempty"` + // Available memory for use. This is defined as the memory limit - commit_memory_bytes. + AvailableBytes *UInt64Value `protobuf:"bytes,3,opt,name=available_bytes,json=availableBytes,proto3" json:"available_bytes,omitempty"` + // Cumulative number of page faults. + PageFaults *UInt64Value `protobuf:"bytes,4,opt,name=page_faults,json=pageFaults,proto3" json:"page_faults,omitempty"` + // Total commit memory in use. Commit memory is total of physical and virtual memory in use. + CommitMemoryBytes *UInt64Value `protobuf:"bytes,5,opt,name=commit_memory_bytes,json=commitMemoryBytes,proto3" json:"commit_memory_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WindowsMemoryUsage) Reset() { *m = WindowsMemoryUsage{} } +func (*WindowsMemoryUsage) ProtoMessage() {} +func (*WindowsMemoryUsage) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{129} +} +func (m *WindowsMemoryUsage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsMemoryUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WindowsMemoryUsage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WindowsMemoryUsage) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsMemoryUsage.Merge(m, src) +} +func (m *WindowsMemoryUsage) XXX_Size() int { + return m.Size() +} +func (m *WindowsMemoryUsage) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsMemoryUsage.DiscardUnknown(m) +} + +var xxx_messageInfo_WindowsMemoryUsage proto.InternalMessageInfo + +func (m *WindowsMemoryUsage) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *WindowsMemoryUsage) GetWorkingSetBytes() *UInt64Value { + if m != nil { + return m.WorkingSetBytes + } + return nil +} + +func (m *WindowsMemoryUsage) GetAvailableBytes() *UInt64Value { + if m != nil { + return m.AvailableBytes + } + return nil +} + +func (m *WindowsMemoryUsage) GetPageFaults() *UInt64Value { + if m != nil { + return m.PageFaults + } + return nil +} + +func (m *WindowsMemoryUsage) GetCommitMemoryBytes() *UInt64Value { + if m != nil { + return m.CommitMemoryBytes + } + return nil +} + +type ReopenContainerLogRequest struct { + // ID of the container for which to reopen the log. + ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReopenContainerLogRequest) Reset() { *m = ReopenContainerLogRequest{} } +func (*ReopenContainerLogRequest) ProtoMessage() {} +func (*ReopenContainerLogRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{130} +} +func (m *ReopenContainerLogRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReopenContainerLogRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReopenContainerLogRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReopenContainerLogRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReopenContainerLogRequest.Merge(m, src) +} +func (m *ReopenContainerLogRequest) XXX_Size() int { + return m.Size() +} +func (m *ReopenContainerLogRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReopenContainerLogRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReopenContainerLogRequest proto.InternalMessageInfo + +func (m *ReopenContainerLogRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +type ReopenContainerLogResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReopenContainerLogResponse) Reset() { *m = ReopenContainerLogResponse{} } +func (*ReopenContainerLogResponse) ProtoMessage() {} +func (*ReopenContainerLogResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{131} +} +func (m *ReopenContainerLogResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReopenContainerLogResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ReopenContainerLogResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ReopenContainerLogResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReopenContainerLogResponse.Merge(m, src) +} +func (m *ReopenContainerLogResponse) XXX_Size() int { + return m.Size() +} +func (m *ReopenContainerLogResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReopenContainerLogResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReopenContainerLogResponse proto.InternalMessageInfo + +type CheckpointContainerRequest struct { + // ID of the container to be checkpointed. + ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // Location of the checkpoint archive used for export + Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"` + // Timeout in seconds for the checkpoint to complete. + // Timeout of zero means to use the CRI default. + // Timeout > 0 means to use the user specified timeout. + Timeout int64 `protobuf:"varint,3,opt,name=timeout,proto3" json:"timeout,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CheckpointContainerRequest) Reset() { *m = CheckpointContainerRequest{} } +func (*CheckpointContainerRequest) ProtoMessage() {} +func (*CheckpointContainerRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{132} +} +func (m *CheckpointContainerRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CheckpointContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CheckpointContainerRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CheckpointContainerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckpointContainerRequest.Merge(m, src) +} +func (m *CheckpointContainerRequest) XXX_Size() int { + return m.Size() +} +func (m *CheckpointContainerRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CheckpointContainerRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CheckpointContainerRequest proto.InternalMessageInfo + +func (m *CheckpointContainerRequest) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +func (m *CheckpointContainerRequest) GetLocation() string { + if m != nil { + return m.Location + } + return "" +} + +func (m *CheckpointContainerRequest) GetTimeout() int64 { + if m != nil { + return m.Timeout + } + return 0 +} + +type CheckpointContainerResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CheckpointContainerResponse) Reset() { *m = CheckpointContainerResponse{} } +func (*CheckpointContainerResponse) ProtoMessage() {} +func (*CheckpointContainerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{133} +} +func (m *CheckpointContainerResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CheckpointContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CheckpointContainerResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CheckpointContainerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CheckpointContainerResponse.Merge(m, src) +} +func (m *CheckpointContainerResponse) XXX_Size() int { + return m.Size() +} +func (m *CheckpointContainerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CheckpointContainerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CheckpointContainerResponse proto.InternalMessageInfo + +type GetEventsRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetEventsRequest) Reset() { *m = GetEventsRequest{} } +func (*GetEventsRequest) ProtoMessage() {} +func (*GetEventsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{134} +} +func (m *GetEventsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetEventsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GetEventsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GetEventsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetEventsRequest.Merge(m, src) +} +func (m *GetEventsRequest) XXX_Size() int { + return m.Size() +} +func (m *GetEventsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetEventsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetEventsRequest proto.InternalMessageInfo + +type ContainerEventResponse struct { + // ID of the container + ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + // Type of the container event + ContainerEventType ContainerEventType `protobuf:"varint,2,opt,name=container_event_type,json=containerEventType,proto3,enum=runtime.v1.ContainerEventType" json:"container_event_type,omitempty"` + // Creation timestamp of this event + CreatedAt int64 `protobuf:"varint,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // Sandbox status + PodSandboxStatus *PodSandboxStatus `protobuf:"bytes,4,opt,name=pod_sandbox_status,json=podSandboxStatus,proto3" json:"pod_sandbox_status,omitempty"` + // Container statuses + ContainersStatuses []*ContainerStatus `protobuf:"bytes,5,rep,name=containers_statuses,json=containersStatuses,proto3" json:"containers_statuses,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerEventResponse) Reset() { *m = ContainerEventResponse{} } +func (*ContainerEventResponse) ProtoMessage() {} +func (*ContainerEventResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{135} +} +func (m *ContainerEventResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerEventResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerEventResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerEventResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerEventResponse.Merge(m, src) +} +func (m *ContainerEventResponse) XXX_Size() int { + return m.Size() +} +func (m *ContainerEventResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerEventResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerEventResponse proto.InternalMessageInfo + +func (m *ContainerEventResponse) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +func (m *ContainerEventResponse) GetContainerEventType() ContainerEventType { + if m != nil { + return m.ContainerEventType + } + return ContainerEventType_CONTAINER_CREATED_EVENT +} + +func (m *ContainerEventResponse) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *ContainerEventResponse) GetPodSandboxStatus() *PodSandboxStatus { + if m != nil { + return m.PodSandboxStatus + } + return nil +} + +func (m *ContainerEventResponse) GetContainersStatuses() []*ContainerStatus { + if m != nil { + return m.ContainersStatuses + } + return nil +} + +type ListMetricDescriptorsRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListMetricDescriptorsRequest) Reset() { *m = ListMetricDescriptorsRequest{} } +func (*ListMetricDescriptorsRequest) ProtoMessage() {} +func (*ListMetricDescriptorsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{136} +} +func (m *ListMetricDescriptorsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListMetricDescriptorsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListMetricDescriptorsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListMetricDescriptorsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMetricDescriptorsRequest.Merge(m, src) +} +func (m *ListMetricDescriptorsRequest) XXX_Size() int { + return m.Size() +} +func (m *ListMetricDescriptorsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListMetricDescriptorsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMetricDescriptorsRequest proto.InternalMessageInfo + +type ListMetricDescriptorsResponse struct { + Descriptors []*MetricDescriptor `protobuf:"bytes,1,rep,name=descriptors,proto3" json:"descriptors,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListMetricDescriptorsResponse) Reset() { *m = ListMetricDescriptorsResponse{} } +func (*ListMetricDescriptorsResponse) ProtoMessage() {} +func (*ListMetricDescriptorsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{137} +} +func (m *ListMetricDescriptorsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListMetricDescriptorsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListMetricDescriptorsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListMetricDescriptorsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMetricDescriptorsResponse.Merge(m, src) +} +func (m *ListMetricDescriptorsResponse) XXX_Size() int { + return m.Size() +} +func (m *ListMetricDescriptorsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListMetricDescriptorsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMetricDescriptorsResponse proto.InternalMessageInfo + +func (m *ListMetricDescriptorsResponse) GetDescriptors() []*MetricDescriptor { + if m != nil { + return m.Descriptors + } + return nil +} + +type MetricDescriptor struct { + // The name field will be used as a unique identifier of this MetricDescriptor, + // and be used in conjunction with the Metric structure to populate the full Metric. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Help string `protobuf:"bytes,2,opt,name=help,proto3" json:"help,omitempty"` + // When a metric uses this metric descriptor, it should only define + // labels that have previously been declared in label_keys. + // It is the responsibility of the runtime to correctly keep sorted the keys and values. + // If the two slices have different length, the behavior is undefined. + LabelKeys []string `protobuf:"bytes,3,rep,name=label_keys,json=labelKeys,proto3" json:"label_keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} } +func (*MetricDescriptor) ProtoMessage() {} +func (*MetricDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{138} +} +func (m *MetricDescriptor) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MetricDescriptor.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MetricDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricDescriptor.Merge(m, src) +} +func (m *MetricDescriptor) XXX_Size() int { + return m.Size() +} +func (m *MetricDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_MetricDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricDescriptor proto.InternalMessageInfo + +func (m *MetricDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MetricDescriptor) GetHelp() string { + if m != nil { + return m.Help + } + return "" +} + +func (m *MetricDescriptor) GetLabelKeys() []string { + if m != nil { + return m.LabelKeys + } + return nil +} + +type ListPodSandboxMetricsRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListPodSandboxMetricsRequest) Reset() { *m = ListPodSandboxMetricsRequest{} } +func (*ListPodSandboxMetricsRequest) ProtoMessage() {} +func (*ListPodSandboxMetricsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{139} +} +func (m *ListPodSandboxMetricsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListPodSandboxMetricsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListPodSandboxMetricsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListPodSandboxMetricsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListPodSandboxMetricsRequest.Merge(m, src) +} +func (m *ListPodSandboxMetricsRequest) XXX_Size() int { + return m.Size() +} +func (m *ListPodSandboxMetricsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListPodSandboxMetricsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListPodSandboxMetricsRequest proto.InternalMessageInfo + +type ListPodSandboxMetricsResponse struct { + PodMetrics []*PodSandboxMetrics `protobuf:"bytes,1,rep,name=pod_metrics,json=podMetrics,proto3" json:"pod_metrics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListPodSandboxMetricsResponse) Reset() { *m = ListPodSandboxMetricsResponse{} } +func (*ListPodSandboxMetricsResponse) ProtoMessage() {} +func (*ListPodSandboxMetricsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{140} +} +func (m *ListPodSandboxMetricsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListPodSandboxMetricsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListPodSandboxMetricsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListPodSandboxMetricsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListPodSandboxMetricsResponse.Merge(m, src) +} +func (m *ListPodSandboxMetricsResponse) XXX_Size() int { + return m.Size() +} +func (m *ListPodSandboxMetricsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListPodSandboxMetricsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListPodSandboxMetricsResponse proto.InternalMessageInfo + +func (m *ListPodSandboxMetricsResponse) GetPodMetrics() []*PodSandboxMetrics { + if m != nil { + return m.PodMetrics + } + return nil +} + +type PodSandboxMetrics struct { + PodSandboxId string `protobuf:"bytes,1,opt,name=pod_sandbox_id,json=podSandboxId,proto3" json:"pod_sandbox_id,omitempty"` + Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` + ContainerMetrics []*ContainerMetrics `protobuf:"bytes,3,rep,name=container_metrics,json=containerMetrics,proto3" json:"container_metrics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PodSandboxMetrics) Reset() { *m = PodSandboxMetrics{} } +func (*PodSandboxMetrics) ProtoMessage() {} +func (*PodSandboxMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{141} +} +func (m *PodSandboxMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSandboxMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PodSandboxMetrics.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PodSandboxMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSandboxMetrics.Merge(m, src) +} +func (m *PodSandboxMetrics) XXX_Size() int { + return m.Size() +} +func (m *PodSandboxMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_PodSandboxMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSandboxMetrics proto.InternalMessageInfo + +func (m *PodSandboxMetrics) GetPodSandboxId() string { + if m != nil { + return m.PodSandboxId + } + return "" +} + +func (m *PodSandboxMetrics) GetMetrics() []*Metric { + if m != nil { + return m.Metrics + } + return nil +} + +func (m *PodSandboxMetrics) GetContainerMetrics() []*ContainerMetrics { + if m != nil { + return m.ContainerMetrics + } + return nil +} + +type ContainerMetrics struct { + ContainerId string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ContainerMetrics) Reset() { *m = ContainerMetrics{} } +func (*ContainerMetrics) ProtoMessage() {} +func (*ContainerMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{142} +} +func (m *ContainerMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ContainerMetrics.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ContainerMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerMetrics.Merge(m, src) +} +func (m *ContainerMetrics) XXX_Size() int { + return m.Size() +} +func (m *ContainerMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerMetrics proto.InternalMessageInfo + +func (m *ContainerMetrics) GetContainerId() string { + if m != nil { + return m.ContainerId + } + return "" +} + +func (m *ContainerMetrics) GetMetrics() []*Metric { + if m != nil { + return m.Metrics + } + return nil +} + +type Metric struct { + // Name must match a name previously returned in a MetricDescriptors call, + // otherwise, it will be ignored. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Timestamp should be 0 if the metric was gathered live. + // If it was cached, the Timestamp should reflect the time it was collected. + Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + MetricType MetricType `protobuf:"varint,3,opt,name=metric_type,json=metricType,proto3,enum=runtime.v1.MetricType" json:"metric_type,omitempty"` + // The corresponding LabelValues to the LabelKeys defined in the MetricDescriptor. + // It is the responsibility of the runtime to correctly keep sorted the keys and values. + // If the two slices have different length, the behavior is undefined. + LabelValues []string `protobuf:"bytes,4,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` + Value *UInt64Value `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{143} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return m.Size() +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Metric) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *Metric) GetMetricType() MetricType { + if m != nil { + return m.MetricType + } + return MetricType_COUNTER +} + +func (m *Metric) GetLabelValues() []string { + if m != nil { + return m.LabelValues + } + return nil +} + +func (m *Metric) GetValue() *UInt64Value { + if m != nil { + return m.Value + } + return nil +} + +type RuntimeConfigRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RuntimeConfigRequest) Reset() { *m = RuntimeConfigRequest{} } +func (*RuntimeConfigRequest) ProtoMessage() {} +func (*RuntimeConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{144} +} +func (m *RuntimeConfigRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RuntimeConfigRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RuntimeConfigRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeConfigRequest.Merge(m, src) +} +func (m *RuntimeConfigRequest) XXX_Size() int { + return m.Size() +} +func (m *RuntimeConfigRequest) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeConfigRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeConfigRequest proto.InternalMessageInfo + +type RuntimeConfigResponse struct { + // Configuration information for Linux-based runtimes. This field contains + // global runtime configuration options that are not specific to runtime + // handlers. + Linux *LinuxRuntimeConfiguration `protobuf:"bytes,1,opt,name=linux,proto3" json:"linux,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RuntimeConfigResponse) Reset() { *m = RuntimeConfigResponse{} } +func (*RuntimeConfigResponse) ProtoMessage() {} +func (*RuntimeConfigResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{145} +} +func (m *RuntimeConfigResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RuntimeConfigResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RuntimeConfigResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeConfigResponse.Merge(m, src) +} +func (m *RuntimeConfigResponse) XXX_Size() int { + return m.Size() +} +func (m *RuntimeConfigResponse) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeConfigResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeConfigResponse proto.InternalMessageInfo + +func (m *RuntimeConfigResponse) GetLinux() *LinuxRuntimeConfiguration { + if m != nil { + return m.Linux + } + return nil +} + +type LinuxRuntimeConfiguration struct { + // Cgroup driver to use + // Note: this field should not change for the lifecycle of the Kubelet, + // or while there are running containers. + // The Kubelet will not re-request this after startup, and will construct the cgroup + // hierarchy assuming it is static. + // If the runtime wishes to change this value, it must be accompanied by removal of + // all pods, and a restart of the Kubelet. The easiest way to do this is with a full node reboot. + CgroupDriver CgroupDriver `protobuf:"varint,1,opt,name=cgroup_driver,json=cgroupDriver,proto3,enum=runtime.v1.CgroupDriver" json:"cgroup_driver,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LinuxRuntimeConfiguration) Reset() { *m = LinuxRuntimeConfiguration{} } +func (*LinuxRuntimeConfiguration) ProtoMessage() {} +func (*LinuxRuntimeConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{146} +} +func (m *LinuxRuntimeConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LinuxRuntimeConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LinuxRuntimeConfiguration.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LinuxRuntimeConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_LinuxRuntimeConfiguration.Merge(m, src) +} +func (m *LinuxRuntimeConfiguration) XXX_Size() int { + return m.Size() +} +func (m *LinuxRuntimeConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_LinuxRuntimeConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_LinuxRuntimeConfiguration proto.InternalMessageInfo + +func (m *LinuxRuntimeConfiguration) GetCgroupDriver() CgroupDriver { + if m != nil { + return m.CgroupDriver + } + return CgroupDriver_SYSTEMD +} + +func init() { + proto.RegisterEnum("runtime.v1.Protocol", Protocol_name, Protocol_value) + proto.RegisterEnum("runtime.v1.MountPropagation", MountPropagation_name, MountPropagation_value) + proto.RegisterEnum("runtime.v1.NamespaceMode", NamespaceMode_name, NamespaceMode_value) + proto.RegisterEnum("runtime.v1.PodSandboxState", PodSandboxState_name, PodSandboxState_value) + proto.RegisterEnum("runtime.v1.ContainerState", ContainerState_name, ContainerState_value) + proto.RegisterEnum("runtime.v1.ContainerEventType", ContainerEventType_name, ContainerEventType_value) + proto.RegisterEnum("runtime.v1.MetricType", MetricType_name, MetricType_value) + proto.RegisterEnum("runtime.v1.CgroupDriver", CgroupDriver_name, CgroupDriver_value) + proto.RegisterEnum("runtime.v1.SecurityProfile_ProfileType", SecurityProfile_ProfileType_name, SecurityProfile_ProfileType_value) + proto.RegisterType((*VersionRequest)(nil), "runtime.v1.VersionRequest") + proto.RegisterType((*VersionResponse)(nil), "runtime.v1.VersionResponse") + proto.RegisterType((*DNSConfig)(nil), "runtime.v1.DNSConfig") + proto.RegisterType((*PortMapping)(nil), "runtime.v1.PortMapping") + proto.RegisterType((*Mount)(nil), "runtime.v1.Mount") + proto.RegisterType((*IDMapping)(nil), "runtime.v1.IDMapping") + proto.RegisterType((*UserNamespace)(nil), "runtime.v1.UserNamespace") + proto.RegisterType((*NamespaceOption)(nil), "runtime.v1.NamespaceOption") + proto.RegisterType((*Int64Value)(nil), "runtime.v1.Int64Value") + proto.RegisterType((*LinuxSandboxSecurityContext)(nil), "runtime.v1.LinuxSandboxSecurityContext") + proto.RegisterType((*SecurityProfile)(nil), "runtime.v1.SecurityProfile") + proto.RegisterType((*LinuxPodSandboxConfig)(nil), "runtime.v1.LinuxPodSandboxConfig") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.LinuxPodSandboxConfig.SysctlsEntry") + proto.RegisterType((*PodSandboxMetadata)(nil), "runtime.v1.PodSandboxMetadata") + proto.RegisterType((*PodSandboxConfig)(nil), "runtime.v1.PodSandboxConfig") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.PodSandboxConfig.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.PodSandboxConfig.LabelsEntry") + proto.RegisterType((*RunPodSandboxRequest)(nil), "runtime.v1.RunPodSandboxRequest") + proto.RegisterType((*RunPodSandboxResponse)(nil), "runtime.v1.RunPodSandboxResponse") + proto.RegisterType((*StopPodSandboxRequest)(nil), "runtime.v1.StopPodSandboxRequest") + proto.RegisterType((*StopPodSandboxResponse)(nil), "runtime.v1.StopPodSandboxResponse") + proto.RegisterType((*RemovePodSandboxRequest)(nil), "runtime.v1.RemovePodSandboxRequest") + proto.RegisterType((*RemovePodSandboxResponse)(nil), "runtime.v1.RemovePodSandboxResponse") + proto.RegisterType((*PodSandboxStatusRequest)(nil), "runtime.v1.PodSandboxStatusRequest") + proto.RegisterType((*PodIP)(nil), "runtime.v1.PodIP") + proto.RegisterType((*PodSandboxNetworkStatus)(nil), "runtime.v1.PodSandboxNetworkStatus") + proto.RegisterType((*Namespace)(nil), "runtime.v1.Namespace") + proto.RegisterType((*LinuxPodSandboxStatus)(nil), "runtime.v1.LinuxPodSandboxStatus") + proto.RegisterType((*PodSandboxStatus)(nil), "runtime.v1.PodSandboxStatus") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.PodSandboxStatus.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.PodSandboxStatus.LabelsEntry") + proto.RegisterType((*PodSandboxStatusResponse)(nil), "runtime.v1.PodSandboxStatusResponse") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.PodSandboxStatusResponse.InfoEntry") + proto.RegisterType((*PodSandboxStateValue)(nil), "runtime.v1.PodSandboxStateValue") + proto.RegisterType((*PodSandboxFilter)(nil), "runtime.v1.PodSandboxFilter") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.PodSandboxFilter.LabelSelectorEntry") + proto.RegisterType((*ListPodSandboxRequest)(nil), "runtime.v1.ListPodSandboxRequest") + proto.RegisterType((*PodSandbox)(nil), "runtime.v1.PodSandbox") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.PodSandbox.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.PodSandbox.LabelsEntry") + proto.RegisterType((*ListPodSandboxResponse)(nil), "runtime.v1.ListPodSandboxResponse") + proto.RegisterType((*PodSandboxStatsRequest)(nil), "runtime.v1.PodSandboxStatsRequest") + proto.RegisterType((*PodSandboxStatsResponse)(nil), "runtime.v1.PodSandboxStatsResponse") + proto.RegisterType((*PodSandboxStatsFilter)(nil), "runtime.v1.PodSandboxStatsFilter") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.PodSandboxStatsFilter.LabelSelectorEntry") + proto.RegisterType((*ListPodSandboxStatsRequest)(nil), "runtime.v1.ListPodSandboxStatsRequest") + proto.RegisterType((*ListPodSandboxStatsResponse)(nil), "runtime.v1.ListPodSandboxStatsResponse") + proto.RegisterType((*PodSandboxAttributes)(nil), "runtime.v1.PodSandboxAttributes") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.PodSandboxAttributes.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.PodSandboxAttributes.LabelsEntry") + proto.RegisterType((*PodSandboxStats)(nil), "runtime.v1.PodSandboxStats") + proto.RegisterType((*LinuxPodSandboxStats)(nil), "runtime.v1.LinuxPodSandboxStats") + proto.RegisterType((*WindowsPodSandboxStats)(nil), "runtime.v1.WindowsPodSandboxStats") + proto.RegisterType((*NetworkUsage)(nil), "runtime.v1.NetworkUsage") + proto.RegisterType((*WindowsNetworkUsage)(nil), "runtime.v1.WindowsNetworkUsage") + proto.RegisterType((*NetworkInterfaceUsage)(nil), "runtime.v1.NetworkInterfaceUsage") + proto.RegisterType((*WindowsNetworkInterfaceUsage)(nil), "runtime.v1.WindowsNetworkInterfaceUsage") + proto.RegisterType((*ProcessUsage)(nil), "runtime.v1.ProcessUsage") + proto.RegisterType((*WindowsProcessUsage)(nil), "runtime.v1.WindowsProcessUsage") + proto.RegisterType((*ImageSpec)(nil), "runtime.v1.ImageSpec") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.ImageSpec.AnnotationsEntry") + proto.RegisterType((*KeyValue)(nil), "runtime.v1.KeyValue") + proto.RegisterType((*LinuxContainerResources)(nil), "runtime.v1.LinuxContainerResources") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.LinuxContainerResources.UnifiedEntry") + proto.RegisterType((*HugepageLimit)(nil), "runtime.v1.HugepageLimit") + proto.RegisterType((*SELinuxOption)(nil), "runtime.v1.SELinuxOption") + proto.RegisterType((*Capability)(nil), "runtime.v1.Capability") + proto.RegisterType((*LinuxContainerSecurityContext)(nil), "runtime.v1.LinuxContainerSecurityContext") + proto.RegisterType((*LinuxContainerConfig)(nil), "runtime.v1.LinuxContainerConfig") + proto.RegisterType((*WindowsNamespaceOption)(nil), "runtime.v1.WindowsNamespaceOption") + proto.RegisterType((*WindowsSandboxSecurityContext)(nil), "runtime.v1.WindowsSandboxSecurityContext") + proto.RegisterType((*WindowsPodSandboxConfig)(nil), "runtime.v1.WindowsPodSandboxConfig") + proto.RegisterType((*WindowsContainerSecurityContext)(nil), "runtime.v1.WindowsContainerSecurityContext") + proto.RegisterType((*WindowsContainerConfig)(nil), "runtime.v1.WindowsContainerConfig") + proto.RegisterType((*WindowsContainerResources)(nil), "runtime.v1.WindowsContainerResources") + proto.RegisterType((*ContainerMetadata)(nil), "runtime.v1.ContainerMetadata") + proto.RegisterType((*Device)(nil), "runtime.v1.Device") + proto.RegisterType((*CDIDevice)(nil), "runtime.v1.CDIDevice") + proto.RegisterType((*ContainerConfig)(nil), "runtime.v1.ContainerConfig") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.ContainerConfig.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.ContainerConfig.LabelsEntry") + proto.RegisterType((*CreateContainerRequest)(nil), "runtime.v1.CreateContainerRequest") + proto.RegisterType((*CreateContainerResponse)(nil), "runtime.v1.CreateContainerResponse") + proto.RegisterType((*StartContainerRequest)(nil), "runtime.v1.StartContainerRequest") + proto.RegisterType((*StartContainerResponse)(nil), "runtime.v1.StartContainerResponse") + proto.RegisterType((*StopContainerRequest)(nil), "runtime.v1.StopContainerRequest") + proto.RegisterType((*StopContainerResponse)(nil), "runtime.v1.StopContainerResponse") + proto.RegisterType((*RemoveContainerRequest)(nil), "runtime.v1.RemoveContainerRequest") + proto.RegisterType((*RemoveContainerResponse)(nil), "runtime.v1.RemoveContainerResponse") + proto.RegisterType((*ContainerStateValue)(nil), "runtime.v1.ContainerStateValue") + proto.RegisterType((*ContainerFilter)(nil), "runtime.v1.ContainerFilter") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.ContainerFilter.LabelSelectorEntry") + proto.RegisterType((*ListContainersRequest)(nil), "runtime.v1.ListContainersRequest") + proto.RegisterType((*Container)(nil), "runtime.v1.Container") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.Container.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.Container.LabelsEntry") + proto.RegisterType((*ListContainersResponse)(nil), "runtime.v1.ListContainersResponse") + proto.RegisterType((*ContainerStatusRequest)(nil), "runtime.v1.ContainerStatusRequest") + proto.RegisterType((*ContainerStatus)(nil), "runtime.v1.ContainerStatus") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.ContainerStatus.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.ContainerStatus.LabelsEntry") + proto.RegisterType((*ContainerStatusResponse)(nil), "runtime.v1.ContainerStatusResponse") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.ContainerStatusResponse.InfoEntry") + proto.RegisterType((*ContainerResources)(nil), "runtime.v1.ContainerResources") + proto.RegisterType((*UpdateContainerResourcesRequest)(nil), "runtime.v1.UpdateContainerResourcesRequest") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.UpdateContainerResourcesRequest.AnnotationsEntry") + proto.RegisterType((*UpdateContainerResourcesResponse)(nil), "runtime.v1.UpdateContainerResourcesResponse") + proto.RegisterType((*ExecSyncRequest)(nil), "runtime.v1.ExecSyncRequest") + proto.RegisterType((*ExecSyncResponse)(nil), "runtime.v1.ExecSyncResponse") + proto.RegisterType((*ExecRequest)(nil), "runtime.v1.ExecRequest") + proto.RegisterType((*ExecResponse)(nil), "runtime.v1.ExecResponse") + proto.RegisterType((*AttachRequest)(nil), "runtime.v1.AttachRequest") + proto.RegisterType((*AttachResponse)(nil), "runtime.v1.AttachResponse") + proto.RegisterType((*PortForwardRequest)(nil), "runtime.v1.PortForwardRequest") + proto.RegisterType((*PortForwardResponse)(nil), "runtime.v1.PortForwardResponse") + proto.RegisterType((*ImageFilter)(nil), "runtime.v1.ImageFilter") + proto.RegisterType((*ListImagesRequest)(nil), "runtime.v1.ListImagesRequest") + proto.RegisterType((*Image)(nil), "runtime.v1.Image") + proto.RegisterType((*ListImagesResponse)(nil), "runtime.v1.ListImagesResponse") + proto.RegisterType((*ImageStatusRequest)(nil), "runtime.v1.ImageStatusRequest") + proto.RegisterType((*ImageStatusResponse)(nil), "runtime.v1.ImageStatusResponse") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.ImageStatusResponse.InfoEntry") + proto.RegisterType((*AuthConfig)(nil), "runtime.v1.AuthConfig") + proto.RegisterType((*PullImageRequest)(nil), "runtime.v1.PullImageRequest") + proto.RegisterType((*PullImageResponse)(nil), "runtime.v1.PullImageResponse") + proto.RegisterType((*RemoveImageRequest)(nil), "runtime.v1.RemoveImageRequest") + proto.RegisterType((*RemoveImageResponse)(nil), "runtime.v1.RemoveImageResponse") + proto.RegisterType((*NetworkConfig)(nil), "runtime.v1.NetworkConfig") + proto.RegisterType((*RuntimeConfig)(nil), "runtime.v1.RuntimeConfig") + proto.RegisterType((*UpdateRuntimeConfigRequest)(nil), "runtime.v1.UpdateRuntimeConfigRequest") + proto.RegisterType((*UpdateRuntimeConfigResponse)(nil), "runtime.v1.UpdateRuntimeConfigResponse") + proto.RegisterType((*RuntimeCondition)(nil), "runtime.v1.RuntimeCondition") + proto.RegisterType((*RuntimeStatus)(nil), "runtime.v1.RuntimeStatus") + proto.RegisterType((*StatusRequest)(nil), "runtime.v1.StatusRequest") + proto.RegisterType((*StatusResponse)(nil), "runtime.v1.StatusResponse") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.StatusResponse.InfoEntry") + proto.RegisterType((*ImageFsInfoRequest)(nil), "runtime.v1.ImageFsInfoRequest") + proto.RegisterType((*UInt64Value)(nil), "runtime.v1.UInt64Value") + proto.RegisterType((*FilesystemIdentifier)(nil), "runtime.v1.FilesystemIdentifier") + proto.RegisterType((*FilesystemUsage)(nil), "runtime.v1.FilesystemUsage") + proto.RegisterType((*WindowsFilesystemUsage)(nil), "runtime.v1.WindowsFilesystemUsage") + proto.RegisterType((*ImageFsInfoResponse)(nil), "runtime.v1.ImageFsInfoResponse") + proto.RegisterType((*ContainerStatsRequest)(nil), "runtime.v1.ContainerStatsRequest") + proto.RegisterType((*ContainerStatsResponse)(nil), "runtime.v1.ContainerStatsResponse") + proto.RegisterType((*ListContainerStatsRequest)(nil), "runtime.v1.ListContainerStatsRequest") + proto.RegisterType((*ContainerStatsFilter)(nil), "runtime.v1.ContainerStatsFilter") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.ContainerStatsFilter.LabelSelectorEntry") + proto.RegisterType((*ListContainerStatsResponse)(nil), "runtime.v1.ListContainerStatsResponse") + proto.RegisterType((*ContainerAttributes)(nil), "runtime.v1.ContainerAttributes") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.ContainerAttributes.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "runtime.v1.ContainerAttributes.LabelsEntry") + proto.RegisterType((*ContainerStats)(nil), "runtime.v1.ContainerStats") + proto.RegisterType((*WindowsContainerStats)(nil), "runtime.v1.WindowsContainerStats") + proto.RegisterType((*CpuUsage)(nil), "runtime.v1.CpuUsage") + proto.RegisterType((*WindowsCpuUsage)(nil), "runtime.v1.WindowsCpuUsage") + proto.RegisterType((*MemoryUsage)(nil), "runtime.v1.MemoryUsage") + proto.RegisterType((*SwapUsage)(nil), "runtime.v1.SwapUsage") + proto.RegisterType((*WindowsMemoryUsage)(nil), "runtime.v1.WindowsMemoryUsage") + proto.RegisterType((*ReopenContainerLogRequest)(nil), "runtime.v1.ReopenContainerLogRequest") + proto.RegisterType((*ReopenContainerLogResponse)(nil), "runtime.v1.ReopenContainerLogResponse") + proto.RegisterType((*CheckpointContainerRequest)(nil), "runtime.v1.CheckpointContainerRequest") + proto.RegisterType((*CheckpointContainerResponse)(nil), "runtime.v1.CheckpointContainerResponse") + proto.RegisterType((*GetEventsRequest)(nil), "runtime.v1.GetEventsRequest") + proto.RegisterType((*ContainerEventResponse)(nil), "runtime.v1.ContainerEventResponse") + proto.RegisterType((*ListMetricDescriptorsRequest)(nil), "runtime.v1.ListMetricDescriptorsRequest") + proto.RegisterType((*ListMetricDescriptorsResponse)(nil), "runtime.v1.ListMetricDescriptorsResponse") + proto.RegisterType((*MetricDescriptor)(nil), "runtime.v1.MetricDescriptor") + proto.RegisterType((*ListPodSandboxMetricsRequest)(nil), "runtime.v1.ListPodSandboxMetricsRequest") + proto.RegisterType((*ListPodSandboxMetricsResponse)(nil), "runtime.v1.ListPodSandboxMetricsResponse") + proto.RegisterType((*PodSandboxMetrics)(nil), "runtime.v1.PodSandboxMetrics") + proto.RegisterType((*ContainerMetrics)(nil), "runtime.v1.ContainerMetrics") + proto.RegisterType((*Metric)(nil), "runtime.v1.Metric") + proto.RegisterType((*RuntimeConfigRequest)(nil), "runtime.v1.RuntimeConfigRequest") + proto.RegisterType((*RuntimeConfigResponse)(nil), "runtime.v1.RuntimeConfigResponse") + proto.RegisterType((*LinuxRuntimeConfiguration)(nil), "runtime.v1.LinuxRuntimeConfiguration") +} + +func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } + +var fileDescriptor_00212fb1f9d3bf1c = []byte{ + // 6821 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x7d, 0x4d, 0x6c, 0x1c, 0xc9, + 0x75, 0x30, 0x7b, 0x66, 0x48, 0xce, 0xbc, 0xe1, 0x90, 0xc3, 0x12, 0x45, 0x52, 0xa3, 0xff, 0xde, + 0x3f, 0x49, 0xbb, 0xfa, 0x59, 0xed, 0x9f, 0x24, 0xef, 0x8f, 0x46, 0x24, 0x57, 0x3b, 0x6b, 0x89, + 0x1c, 0xf7, 0x90, 0x6b, 0xef, 0xfa, 0x83, 0xfb, 0x6b, 0x4d, 0x17, 0xc9, 0x5e, 0xcd, 0x74, 0xb7, + 0xbb, 0x7b, 0x24, 0xd1, 0xa7, 0x9c, 0x82, 0xc4, 0x27, 0x03, 0x89, 0x63, 0xc4, 0x08, 0x12, 0xe4, + 0x10, 0x24, 0xb7, 0xfc, 0x00, 0x49, 0x1c, 0xe4, 0x0f, 0x30, 0x12, 0xc3, 0x09, 0x10, 0x20, 0x87, + 0x04, 0xf0, 0x21, 0x40, 0xec, 0x4d, 0x80, 0x00, 0x39, 0xfb, 0x90, 0x53, 0x1c, 0xd4, 0x5f, 0x77, + 0x57, 0xff, 0xcd, 0x90, 0xbb, 0xde, 0x5d, 0x9f, 0x38, 0xfd, 0xea, 0xbd, 0x57, 0xaf, 0x5e, 0xbd, + 0x7a, 0xf5, 0xaa, 0xea, 0x55, 0x11, 0x6a, 0x86, 0x6b, 0x5d, 0x71, 0x3d, 0x27, 0x70, 0x10, 0x78, + 0x23, 0x3b, 0xb0, 0x86, 0xf8, 0xca, 0xa3, 0x17, 0x5b, 0x97, 0xf7, 0xac, 0x60, 0x7f, 0xf4, 0xe0, + 0x4a, 0xdf, 0x19, 0x5e, 0xdd, 0x73, 0xf6, 0x9c, 0xab, 0x14, 0xe5, 0xc1, 0x68, 0x97, 0x7e, 0xd1, + 0x0f, 0xfa, 0x8b, 0x91, 0xaa, 0x97, 0x60, 0xfe, 0x3d, 0xec, 0xf9, 0x96, 0x63, 0x6b, 0xf8, 0xeb, + 0x23, 0xec, 0x07, 0x68, 0x15, 0x66, 0x1f, 0x31, 0xc8, 0xaa, 0x72, 0x4e, 0xb9, 0x50, 0xd3, 0xc4, + 0xa7, 0xfa, 0xfb, 0x0a, 0x2c, 0x84, 0xc8, 0xbe, 0xeb, 0xd8, 0x3e, 0xce, 0xc7, 0x46, 0xe7, 0x61, + 0x8e, 0x8b, 0xa5, 0xdb, 0xc6, 0x10, 0xaf, 0x96, 0x68, 0x71, 0x9d, 0xc3, 0x36, 0x8d, 0x21, 0x46, + 0xcf, 0xc1, 0x82, 0x40, 0x11, 0x4c, 0xca, 0x14, 0x6b, 0x9e, 0x83, 0x79, 0x6d, 0xe8, 0x0a, 0x1c, + 0x13, 0x88, 0x86, 0x6b, 0x85, 0xc8, 0x15, 0x8a, 0xbc, 0xc8, 0x8b, 0xda, 0xae, 0xc5, 0xf1, 0xd5, + 0xaf, 0x42, 0x6d, 0x7d, 0xb3, 0xb7, 0xe6, 0xd8, 0xbb, 0xd6, 0x1e, 0x11, 0xd1, 0xc7, 0x1e, 0xa1, + 0x59, 0x55, 0xce, 0x95, 0x89, 0x88, 0xfc, 0x13, 0xb5, 0xa0, 0xea, 0x63, 0xc3, 0xeb, 0xef, 0x63, + 0x7f, 0xb5, 0x44, 0x8b, 0xc2, 0x6f, 0x42, 0xe5, 0xb8, 0x81, 0xe5, 0xd8, 0xfe, 0x6a, 0x99, 0x51, + 0xf1, 0x4f, 0xf5, 0xb7, 0x14, 0xa8, 0x77, 0x1d, 0x2f, 0xb8, 0x6f, 0xb8, 0xae, 0x65, 0xef, 0xa1, + 0x6b, 0x50, 0xa5, 0xba, 0xec, 0x3b, 0x03, 0xaa, 0x83, 0xf9, 0xeb, 0x4b, 0x57, 0xa2, 0x0e, 0xb9, + 0xd2, 0xe5, 0x65, 0x5a, 0x88, 0x85, 0x9e, 0x81, 0xf9, 0xbe, 0x63, 0x07, 0x86, 0x65, 0x63, 0x4f, + 0x77, 0x1d, 0x2f, 0xa0, 0xca, 0x99, 0xd6, 0x1a, 0x21, 0x94, 0xf0, 0x47, 0x27, 0xa1, 0xb6, 0xef, + 0xf8, 0x01, 0xc3, 0x28, 0x53, 0x8c, 0x2a, 0x01, 0xd0, 0xc2, 0x15, 0x98, 0xa5, 0x85, 0x96, 0xcb, + 0xd5, 0x30, 0x43, 0x3e, 0x3b, 0xae, 0xfa, 0xfd, 0x12, 0x4c, 0xdf, 0x77, 0x46, 0x76, 0x90, 0xa8, + 0xc6, 0x08, 0xf6, 0x79, 0x17, 0xc5, 0xaa, 0x31, 0x82, 0xfd, 0xa8, 0x1a, 0x82, 0xc1, 0x7a, 0x89, + 0x55, 0x43, 0x0a, 0x5b, 0x50, 0xf5, 0xb0, 0x61, 0x3a, 0xf6, 0xe0, 0x80, 0x8a, 0x50, 0xd5, 0xc2, + 0x6f, 0xd2, 0x7d, 0x3e, 0x1e, 0x58, 0xf6, 0xe8, 0x89, 0xee, 0xe1, 0x81, 0xf1, 0x00, 0x0f, 0xa8, + 0x28, 0x55, 0x6d, 0x9e, 0x83, 0x35, 0x06, 0x45, 0x6f, 0x42, 0xdd, 0xf5, 0x1c, 0xd7, 0xd8, 0x33, + 0x88, 0x06, 0x57, 0xa7, 0xa9, 0x92, 0x4e, 0xc5, 0x95, 0x44, 0x05, 0xee, 0x46, 0x38, 0x5a, 0x9c, + 0x00, 0xbd, 0x06, 0xf5, 0x91, 0x65, 0x72, 0x7d, 0xfb, 0xab, 0x33, 0xe7, 0xca, 0x17, 0xea, 0xd7, + 0x8f, 0xc7, 0xe9, 0x3b, 0xeb, 0xbc, 0x54, 0x8b, 0x63, 0x12, 0xc2, 0xbd, 0x18, 0xe1, 0x6c, 0x21, + 0x61, 0x0c, 0x53, 0xd5, 0xa1, 0x16, 0x96, 0x44, 0xaa, 0x36, 0xa9, 0x02, 0x1b, 0x5c, 0xd5, 0x26, + 0x31, 0xf1, 0x48, 0xc1, 0x96, 0x49, 0x95, 0xd7, 0xd0, 0xea, 0x21, 0xac, 0x63, 0xa2, 0x65, 0x98, + 0x19, 0x60, 0x7b, 0x2f, 0xd8, 0xa7, 0xda, 0x6b, 0x68, 0xfc, 0x4b, 0xfd, 0x75, 0x05, 0x1a, 0x3b, + 0x3e, 0xf6, 0xc8, 0x38, 0xf0, 0x5d, 0xa3, 0x8f, 0xd1, 0x65, 0xa8, 0x0c, 0x1d, 0x13, 0x73, 0x13, + 0x3a, 0x11, 0x17, 0x32, 0x44, 0xba, 0xef, 0x98, 0x58, 0xa3, 0x68, 0xe8, 0x22, 0x54, 0x46, 0x96, + 0xc9, 0xec, 0x36, 0xb7, 0x4d, 0x14, 0x85, 0xa0, 0xee, 0x11, 0xd4, 0x72, 0x21, 0x2a, 0x41, 0x51, + 0x7f, 0xa6, 0xc0, 0x42, 0x58, 0xdb, 0x16, 0x35, 0x78, 0xf4, 0x12, 0xcc, 0xda, 0x38, 0x78, 0xec, + 0x78, 0x0f, 0xc7, 0xcb, 0x26, 0x30, 0xd1, 0xf3, 0x50, 0x76, 0xb9, 0x46, 0x0a, 0x09, 0x08, 0x16, + 0x41, 0xb6, 0xdc, 0x3e, 0xd5, 0x50, 0x31, 0xb2, 0xe5, 0xf6, 0x89, 0xb9, 0x06, 0x86, 0xb7, 0x87, + 0x69, 0x7f, 0x30, 0xd3, 0xaf, 0x32, 0x40, 0xc7, 0x44, 0xb7, 0x61, 0x7e, 0xe4, 0x63, 0xcf, 0xf6, + 0x75, 0x31, 0x78, 0x89, 0xb1, 0xd5, 0x65, 0xa6, 0x92, 0xde, 0xb5, 0x06, 0x23, 0xd8, 0xe2, 0xa3, + 0x5b, 0x05, 0xe8, 0xd8, 0xc1, 0xab, 0x2f, 0xbf, 0x67, 0x0c, 0x46, 0x18, 0x2d, 0xc1, 0xf4, 0x23, + 0xf2, 0x83, 0xb6, 0xbc, 0xac, 0xb1, 0x0f, 0xf5, 0xaf, 0x2b, 0x70, 0xf2, 0x1e, 0x31, 0xf0, 0x9e, + 0x61, 0x9b, 0x0f, 0x9c, 0x27, 0x3d, 0xdc, 0x1f, 0x79, 0x56, 0x70, 0xb0, 0xe6, 0xd8, 0x01, 0x7e, + 0x12, 0xa0, 0x77, 0x60, 0xd1, 0x16, 0xfc, 0x43, 0x41, 0x14, 0x2a, 0xc8, 0xc9, 0xcc, 0xd6, 0xb1, + 0xca, 0xb5, 0xa6, 0x2d, 0x03, 0x7c, 0x74, 0x27, 0x1a, 0x62, 0x82, 0x4f, 0x29, 0xdd, 0xa0, 0xde, + 0x06, 0x95, 0x86, 0x73, 0x11, 0xa3, 0x4f, 0xf0, 0x78, 0x15, 0x88, 0xd3, 0xd5, 0x0d, 0x5f, 0x27, + 0x2d, 0xa5, 0x5a, 0xae, 0x5f, 0x5f, 0x96, 0xac, 0x20, 0x6c, 0xb0, 0x56, 0xf3, 0x46, 0x76, 0xdb, + 0x27, 0x1a, 0x42, 0x37, 0xa8, 0x03, 0x27, 0x74, 0x7b, 0x9e, 0x33, 0x72, 0x57, 0xab, 0x85, 0x84, + 0x40, 0x09, 0xef, 0x12, 0x4c, 0xea, 0xd7, 0xb9, 0x93, 0xd0, 0x3d, 0xc7, 0x09, 0x76, 0x7d, 0xe1, + 0x18, 0x04, 0x58, 0xa3, 0x50, 0x74, 0x15, 0x8e, 0xf9, 0x23, 0xd7, 0x1d, 0xe0, 0x21, 0xb6, 0x03, + 0x63, 0xc0, 0x2a, 0x22, 0x7d, 0x56, 0xbe, 0x50, 0xd6, 0x50, 0xbc, 0x88, 0x32, 0xf6, 0xd1, 0x19, + 0x00, 0xd7, 0xb3, 0x1e, 0x59, 0x03, 0xbc, 0x87, 0xcd, 0xd5, 0x19, 0xca, 0x34, 0x06, 0x41, 0xaf, + 0x10, 0x5f, 0xdf, 0xef, 0x3b, 0x43, 0x77, 0xb5, 0x96, 0xd6, 0xb7, 0xe8, 0xa7, 0xae, 0xe7, 0xec, + 0x5a, 0x03, 0xac, 0x09, 0x5c, 0xf4, 0x1a, 0x54, 0x0d, 0xd7, 0x35, 0xbc, 0xa1, 0xe3, 0xad, 0xc2, + 0x78, 0xba, 0x10, 0x19, 0xbd, 0x0c, 0x4b, 0x9c, 0x87, 0xee, 0xb2, 0x42, 0xe6, 0x46, 0x67, 0x89, + 0x5d, 0xde, 0x29, 0xad, 0x2a, 0x1a, 0xe2, 0xe5, 0x9c, 0x96, 0x38, 0x55, 0xf5, 0xef, 0x14, 0x58, + 0x48, 0xf0, 0x44, 0xef, 0xc2, 0x9c, 0xe0, 0x10, 0x1c, 0xb8, 0xc2, 0x0d, 0x3c, 0x57, 0x20, 0xc6, + 0x15, 0xfe, 0x77, 0xfb, 0xc0, 0xc5, 0xd4, 0x5f, 0x8a, 0x0f, 0xf4, 0x14, 0x34, 0x06, 0x4e, 0xdf, + 0x18, 0x50, 0xaf, 0xe5, 0xe1, 0x5d, 0xee, 0xd5, 0xe7, 0x42, 0xa0, 0x86, 0x77, 0xd5, 0xdb, 0x50, + 0x8f, 0x31, 0x40, 0x08, 0xe6, 0x35, 0x56, 0xd5, 0x3a, 0xde, 0x35, 0x46, 0x83, 0xa0, 0x39, 0x85, + 0xe6, 0x01, 0x76, 0xec, 0x3e, 0x99, 0x45, 0x6d, 0x6c, 0x36, 0x15, 0xd4, 0x80, 0xda, 0x3d, 0xc1, + 0xa2, 0x59, 0x52, 0xbf, 0x5b, 0x86, 0xe3, 0xd4, 0xf0, 0xba, 0x8e, 0xc9, 0x47, 0x02, 0x9f, 0x72, + 0x9f, 0x82, 0x46, 0x9f, 0xf6, 0xa5, 0xee, 0x1a, 0x1e, 0xb6, 0x03, 0x3e, 0xf1, 0xcc, 0x31, 0x60, + 0x97, 0xc2, 0x90, 0x06, 0x4d, 0x9f, 0xb7, 0x48, 0xef, 0xb3, 0x91, 0xc3, 0x8d, 0x5b, 0x6a, 0x75, + 0xc1, 0x40, 0xd3, 0x16, 0xfc, 0xd4, 0xc8, 0x9b, 0xf5, 0x0f, 0xfc, 0x7e, 0x30, 0x10, 0xde, 0xee, + 0x4a, 0x8a, 0x55, 0x52, 0xd8, 0x2b, 0x3d, 0x46, 0xb0, 0x61, 0x07, 0xde, 0x81, 0x26, 0xc8, 0xd1, + 0x5b, 0x50, 0x75, 0x1e, 0x61, 0x6f, 0x1f, 0x1b, 0xcc, 0xcb, 0xd4, 0xaf, 0x3f, 0x95, 0x62, 0xb5, + 0x26, 0x1c, 0xbd, 0x86, 0x7d, 0x67, 0xe4, 0xf5, 0xb1, 0xaf, 0x85, 0x44, 0xa8, 0x0d, 0x35, 0x4f, + 0x80, 0xb9, 0x17, 0x9a, 0x88, 0x43, 0x44, 0xd5, 0xba, 0x05, 0x73, 0x71, 0xe1, 0x50, 0x13, 0xca, + 0x0f, 0xf1, 0x01, 0x57, 0x26, 0xf9, 0x19, 0xf9, 0x27, 0xd6, 0xc3, 0xec, 0xe3, 0x56, 0xe9, 0x86, + 0xa2, 0x7a, 0x80, 0xa2, 0x96, 0xde, 0xc7, 0x81, 0x61, 0x1a, 0x81, 0x81, 0x10, 0x54, 0x68, 0x30, + 0xc6, 0x58, 0xd0, 0xdf, 0x84, 0xeb, 0x88, 0xbb, 0xea, 0x9a, 0x46, 0x7e, 0xa2, 0x53, 0x50, 0x0b, + 0x3d, 0x11, 0x8f, 0xc8, 0x22, 0x00, 0x89, 0x8c, 0x8c, 0x20, 0xc0, 0x43, 0x37, 0xa0, 0x8a, 0x69, + 0x68, 0xe2, 0x53, 0xfd, 0xd5, 0x69, 0x68, 0xa6, 0x6c, 0xe1, 0x16, 0x54, 0x87, 0xbc, 0x7a, 0xee, + 0x03, 0xcf, 0x48, 0xe1, 0x51, 0x4a, 0x48, 0x2d, 0xc4, 0x27, 0xd1, 0x07, 0xb1, 0xb5, 0x58, 0xfc, + 0x18, 0x7e, 0x33, 0x23, 0xdf, 0xd3, 0x4d, 0xcb, 0xc3, 0xfd, 0xc0, 0xf1, 0x0e, 0xb8, 0xa0, 0x73, + 0x03, 0x67, 0x6f, 0x5d, 0xc0, 0xd0, 0xcb, 0x00, 0xa6, 0xed, 0xeb, 0xd4, 0x86, 0xf7, 0x78, 0x3f, + 0x4a, 0x13, 0x60, 0x18, 0x26, 0x6a, 0x35, 0xd3, 0xf6, 0xb9, 0xc8, 0xaf, 0x43, 0x83, 0xc4, 0x5c, + 0xfa, 0x50, 0x04, 0x0e, 0xd3, 0xd4, 0x96, 0x56, 0x64, 0xb9, 0xc3, 0x08, 0x50, 0x9b, 0x73, 0xa3, + 0x0f, 0x1f, 0xdd, 0x86, 0x19, 0x1a, 0xf6, 0x88, 0x40, 0xe5, 0x42, 0x76, 0x73, 0xb9, 0xf5, 0xdd, + 0xa3, 0xa8, 0xcc, 0xf8, 0x38, 0x1d, 0xda, 0x82, 0xba, 0x61, 0xdb, 0x4e, 0x60, 0x30, 0x8f, 0xcf, + 0xc2, 0x96, 0xcb, 0x85, 0x6c, 0xda, 0x11, 0x3e, 0xe3, 0x15, 0xe7, 0x80, 0x5e, 0x83, 0x69, 0x3a, + 0x25, 0x70, 0x1f, 0x7e, 0x7e, 0xec, 0xa0, 0xd0, 0x18, 0x3e, 0x7a, 0x03, 0x66, 0x1f, 0x5b, 0xb6, + 0xe9, 0x3c, 0xf6, 0xb9, 0x3f, 0x95, 0x4c, 0xf8, 0xcb, 0xac, 0x28, 0x45, 0x2c, 0x68, 0x5a, 0x37, + 0xa1, 0x1e, 0x6b, 0xdf, 0x61, 0xec, 0xb7, 0xf5, 0x26, 0x34, 0x93, 0x6d, 0x3a, 0x94, 0xfd, 0x8f, + 0x60, 0x49, 0x1b, 0xd9, 0x91, 0x68, 0x62, 0x79, 0xf3, 0x32, 0xcc, 0x70, 0x6b, 0x60, 0xc6, 0x78, + 0xaa, 0x48, 0xad, 0x1a, 0xc7, 0x8d, 0xaf, 0x54, 0xf6, 0x0d, 0xdb, 0x1c, 0x60, 0x8f, 0xd7, 0x28, + 0x56, 0x2a, 0xef, 0x30, 0xa8, 0xfa, 0x06, 0x1c, 0x4f, 0x54, 0xcb, 0x17, 0x4a, 0x4f, 0xc3, 0xbc, + 0xeb, 0x98, 0xba, 0xcf, 0xc0, 0x22, 0x96, 0xac, 0x11, 0xdb, 0x11, 0xb8, 0x1d, 0x93, 0x90, 0xf7, + 0x02, 0xc7, 0x4d, 0x8b, 0x3d, 0x19, 0xf9, 0x2a, 0x2c, 0x27, 0xc9, 0x59, 0xf5, 0xea, 0x5b, 0xb0, + 0xa2, 0xe1, 0xa1, 0xf3, 0x08, 0x1f, 0x95, 0x75, 0x0b, 0x56, 0xd3, 0x0c, 0x38, 0xf3, 0xf7, 0x61, + 0x25, 0x82, 0xf6, 0x02, 0x23, 0x18, 0xf9, 0x87, 0x62, 0xce, 0x57, 0x91, 0x0f, 0x1c, 0x9f, 0x75, + 0x64, 0x55, 0x13, 0x9f, 0xea, 0x0a, 0x4c, 0x77, 0x1d, 0xb3, 0xd3, 0x45, 0xf3, 0x50, 0xb2, 0x5c, + 0x4e, 0x5c, 0xb2, 0x5c, 0xb5, 0x1f, 0xaf, 0x73, 0x93, 0x45, 0x9d, 0xac, 0xea, 0x24, 0x2a, 0xba, + 0x01, 0xf3, 0x86, 0x69, 0x5a, 0xc4, 0x90, 0x8c, 0x81, 0x6e, 0xb9, 0x22, 0x68, 0x5e, 0x4c, 0x74, + 0x7d, 0xa7, 0xab, 0x35, 0x22, 0xc4, 0x8e, 0xeb, 0xab, 0x77, 0xa0, 0x16, 0x05, 0xe8, 0xaf, 0x44, + 0x2b, 0xc2, 0xd2, 0xf8, 0x58, 0x2e, 0x5c, 0x2e, 0x6e, 0xa6, 0x26, 0x49, 0x2e, 0xe6, 0x2b, 0x00, + 0xa1, 0x53, 0x15, 0xe1, 0xe1, 0xf1, 0x4c, 0x96, 0x5a, 0x0c, 0x51, 0xfd, 0xf7, 0x4a, 0xdc, 0xc9, + 0xc6, 0x9a, 0x6c, 0x86, 0x4d, 0x36, 0x25, 0xa7, 0x5b, 0x3a, 0xa4, 0xd3, 0x7d, 0x11, 0xa6, 0xfd, + 0xc0, 0x08, 0x30, 0x8f, 0xc7, 0x4f, 0x66, 0x13, 0x92, 0x8a, 0xb1, 0xc6, 0x30, 0xd1, 0x69, 0x80, + 0xbe, 0x87, 0x8d, 0x00, 0x9b, 0xba, 0xc1, 0x66, 0x85, 0xb2, 0x56, 0xe3, 0x90, 0x76, 0x40, 0xbc, + 0x88, 0x58, 0x41, 0x64, 0x4c, 0x84, 0x39, 0xdd, 0x18, 0xad, 0x25, 0x42, 0xef, 0x35, 0x33, 0xd6, + 0x7b, 0x71, 0x52, 0xee, 0xbd, 0x22, 0x4f, 0x3c, 0x5b, 0xe4, 0x89, 0x19, 0xd1, 0x24, 0x9e, 0xb8, + 0x5a, 0xe4, 0x89, 0x39, 0x9b, 0x62, 0x4f, 0x9c, 0xe1, 0x48, 0x6a, 0x59, 0x8e, 0xe4, 0xb3, 0x74, + 0x9d, 0x7f, 0x51, 0x82, 0xd5, 0xf4, 0x78, 0xe6, 0x7e, 0xec, 0x65, 0x98, 0xf1, 0x29, 0xa4, 0xd8, + 0x7f, 0x72, 0x2a, 0x8e, 0x8b, 0xee, 0x40, 0xc5, 0xb2, 0x77, 0x1d, 0x3e, 0xf0, 0xae, 0x14, 0xd2, + 0xf0, 0x9a, 0xae, 0x74, 0xec, 0x5d, 0x87, 0x69, 0x90, 0xd2, 0xa2, 0x7b, 0x70, 0x2c, 0x5c, 0x59, + 0xfb, 0x3a, 0x63, 0x8c, 0x45, 0x9c, 0x27, 0x59, 0x69, 0x18, 0x55, 0x71, 0x8e, 0x28, 0xa2, 0xeb, + 0x71, 0x32, 0x12, 0xe3, 0x10, 0x74, 0x3f, 0x30, 0x86, 0xae, 0xb0, 0xd8, 0x10, 0xd0, 0x7a, 0x0d, + 0x6a, 0x61, 0xf5, 0x87, 0xd2, 0x5d, 0x07, 0x96, 0x12, 0x63, 0x84, 0x2d, 0x24, 0xc3, 0x41, 0xa5, + 0x4c, 0x3a, 0xa8, 0xd4, 0x9f, 0x2a, 0xf1, 0x81, 0xfe, 0xb6, 0x35, 0x08, 0xb0, 0x97, 0x1a, 0xe8, + 0xaf, 0x0a, 0xbe, 0x6c, 0x94, 0x9f, 0x2b, 0xe0, 0xcb, 0xd6, 0x69, 0x7c, 0xc4, 0xbe, 0x07, 0xf3, + 0xd4, 0xc4, 0x75, 0x1f, 0x0f, 0x68, 0xac, 0xc4, 0xf5, 0x78, 0x35, 0x9b, 0x01, 0xab, 0x9d, 0x0d, + 0x91, 0x1e, 0xa7, 0x60, 0x7d, 0xd3, 0x18, 0xc4, 0x61, 0xad, 0xdb, 0x80, 0xd2, 0x48, 0x87, 0xd2, + 0xe0, 0x7d, 0xe2, 0x2f, 0xfd, 0x20, 0x73, 0xe6, 0xde, 0xa5, 0x62, 0x14, 0x5b, 0x1e, 0x13, 0x55, + 0xe3, 0xb8, 0xea, 0xbf, 0x96, 0x01, 0xa2, 0xc2, 0xcf, 0xb9, 0xa3, 0xbc, 0x15, 0x3a, 0x2c, 0x16, + 0x71, 0xaa, 0xd9, 0x2c, 0x33, 0x5d, 0x55, 0x47, 0x76, 0x55, 0x2c, 0xf6, 0x7c, 0x2e, 0x87, 0xc1, + 0xa1, 0x9d, 0xd4, 0xec, 0xe7, 0xcd, 0x49, 0xbd, 0x0d, 0xcb, 0x49, 0x33, 0xe1, 0x1e, 0xea, 0x05, + 0x98, 0xb6, 0x02, 0x3c, 0x64, 0xbb, 0xbd, 0x89, 0x0d, 0x8b, 0x18, 0x3a, 0x43, 0x52, 0xdf, 0x84, + 0x65, 0xb9, 0xaf, 0x0e, 0x17, 0xba, 0xa8, 0xf7, 0x92, 0xb1, 0x4f, 0xe4, 0x2a, 0xb9, 0x7d, 0x64, + 0x6e, 0xfd, 0x24, 0x69, 0x18, 0xa6, 0xfa, 0x03, 0x05, 0x8e, 0x27, 0x8a, 0x72, 0x06, 0xfe, 0x57, + 0x53, 0x03, 0x98, 0xf9, 0xd6, 0x97, 0x0b, 0x6a, 0xf9, 0x14, 0x47, 0xf1, 0x97, 0xa1, 0x25, 0x77, + 0x8f, 0xa4, 0xda, 0x9b, 0x89, 0xa1, 0x7c, 0x7e, 0xac, 0xd0, 0xe1, 0x78, 0xee, 0xc2, 0xc9, 0x4c, + 0xc6, 0x69, 0x9d, 0x97, 0x27, 0xd4, 0xf9, 0xff, 0x94, 0xe2, 0x3e, 0xbb, 0x1d, 0x04, 0x9e, 0xf5, + 0x60, 0x14, 0xe0, 0x4f, 0x36, 0xa8, 0x5a, 0x0f, 0x47, 0x36, 0xf3, 0xb3, 0x2f, 0x64, 0x53, 0x46, + 0xb5, 0x67, 0x8e, 0xf1, 0x9e, 0x3c, 0xc6, 0x2b, 0x94, 0xd5, 0x8b, 0x63, 0x59, 0x15, 0x8e, 0xf6, + 0xcf, 0x72, 0x10, 0xff, 0x83, 0x02, 0x0b, 0x89, 0x5e, 0x41, 0xb7, 0x01, 0x8c, 0x50, 0x74, 0x6e, + 0x1f, 0xe7, 0xc6, 0x35, 0x51, 0x8b, 0xd1, 0x90, 0x39, 0x91, 0xc5, 0x8b, 0x19, 0x73, 0x62, 0x46, + 0xbc, 0x18, 0x86, 0x8b, 0xaf, 0x47, 0x8b, 0x5d, 0xb6, 0x49, 0xaa, 0x16, 0x2e, 0x76, 0x19, 0xad, + 0x20, 0x51, 0x7f, 0xad, 0x04, 0x4b, 0x59, 0xdc, 0xd1, 0xb3, 0x50, 0xee, 0xbb, 0x23, 0xde, 0x12, + 0xe9, 0x68, 0x68, 0xcd, 0x1d, 0xed, 0xf8, 0xc6, 0x1e, 0xd6, 0x08, 0x02, 0xba, 0x0a, 0x33, 0x43, + 0x3c, 0x74, 0xbc, 0x03, 0x2e, 0xb7, 0xb4, 0xdd, 0x70, 0x9f, 0x96, 0x30, 0x6c, 0x8e, 0x86, 0xae, + 0x47, 0x61, 0x35, 0x93, 0x77, 0x55, 0x5a, 0x3d, 0xb0, 0x22, 0x46, 0x12, 0xc6, 0xd2, 0xd7, 0x61, + 0xd6, 0xf5, 0x9c, 0x3e, 0xf6, 0x7d, 0xbe, 0x1b, 0xb2, 0x9a, 0x38, 0xab, 0x22, 0x45, 0x9c, 0x86, + 0x23, 0xa2, 0x5b, 0x00, 0x51, 0x00, 0xc5, 0x67, 0xa6, 0x56, 0x6e, 0xbc, 0xe5, 0x6b, 0x31, 0x6c, + 0xf5, 0x7b, 0x25, 0x58, 0xce, 0xd6, 0x1c, 0xba, 0x1c, 0xd7, 0xcb, 0xc9, 0x0c, 0x55, 0xcb, 0xea, + 0x79, 0x35, 0xa1, 0x9e, 0x33, 0x19, 0x14, 0x59, 0x5a, 0xba, 0x99, 0xd4, 0xd2, 0xd9, 0x0c, 0xc2, + 0x6c, 0x65, 0xdd, 0x4c, 0x2a, 0x2b, 0x8b, 0x34, 0x5b, 0x67, 0xed, 0x0c, 0x9d, 0x9d, 0xcf, 0x6a, + 0x63, 0xbe, 0xea, 0xfe, 0x56, 0x81, 0xb9, 0xb8, 0x5c, 0x72, 0xc8, 0xaa, 0x24, 0x42, 0x56, 0xb4, + 0x09, 0x8b, 0x26, 0xdb, 0xb9, 0xd5, 0x2d, 0x3b, 0xc0, 0xde, 0xae, 0xd1, 0x17, 0x51, 0xe1, 0xf9, + 0x0c, 0xbb, 0xe8, 0x08, 0x1c, 0x26, 0x78, 0x93, 0xd3, 0x86, 0x60, 0xd2, 0x82, 0x90, 0x8f, 0xf0, + 0x5a, 0x13, 0x30, 0x8a, 0x11, 0xa9, 0xff, 0xa2, 0xc0, 0xb1, 0x0c, 0x05, 0x8f, 0x69, 0xc8, 0x4e, + 0x7e, 0x43, 0x2e, 0xe4, 0x77, 0xdd, 0xd8, 0xf6, 0xbc, 0x93, 0xd1, 0x9e, 0xc9, 0xf9, 0xc5, 0x9b, + 0xf5, 0x33, 0x05, 0x8e, 0x67, 0x62, 0x65, 0x6e, 0xaf, 0x5e, 0x87, 0xaa, 0xf7, 0x44, 0x7f, 0x70, + 0x10, 0x60, 0x3f, 0x6b, 0x60, 0xef, 0xc4, 0xce, 0x50, 0x66, 0xbd, 0x27, 0x77, 0x08, 0x1e, 0x7a, + 0x19, 0x6a, 0xde, 0x13, 0x1d, 0x7b, 0x9e, 0xe3, 0x09, 0x5f, 0x94, 0x4b, 0x54, 0xf5, 0x9e, 0x6c, + 0x50, 0x44, 0x52, 0x53, 0x20, 0x6a, 0xaa, 0x8c, 0xa9, 0x29, 0x88, 0x6a, 0x0a, 0xc2, 0x9a, 0xa6, + 0xc7, 0xd4, 0x14, 0xf0, 0x9a, 0xd4, 0x3f, 0x28, 0xc1, 0xa9, 0x22, 0x75, 0x7d, 0x62, 0x8a, 0xd8, + 0x00, 0xe4, 0x3d, 0xd1, 0x5d, 0xa3, 0xff, 0x10, 0x07, 0xbe, 0x6e, 0x7a, 0x8e, 0xeb, 0x62, 0x73, + 0x9c, 0x46, 0x9a, 0xde, 0x93, 0x2e, 0xa3, 0x58, 0x67, 0x04, 0x47, 0xd2, 0xcc, 0x06, 0xa0, 0x20, + 0x5d, 0xf5, 0x18, 0x15, 0x35, 0x83, 0x44, 0xd5, 0xea, 0x87, 0x30, 0x17, 0xf7, 0x10, 0x63, 0x6c, + 0xff, 0x75, 0x68, 0x70, 0x0f, 0xa2, 0xf7, 0x9d, 0x91, 0x1d, 0x8c, 0x53, 0xd4, 0x1c, 0xc7, 0x5e, + 0x23, 0xc8, 0xea, 0xd7, 0xc3, 0xe1, 0xf6, 0xa9, 0x55, 0xf9, 0xcb, 0x25, 0xa8, 0x75, 0x86, 0xc6, + 0x1e, 0xee, 0xb9, 0xb8, 0x4f, 0x66, 0x7a, 0x8b, 0x7c, 0xf0, 0x7e, 0x67, 0x1f, 0xe8, 0x1d, 0x39, + 0x6a, 0x61, 0x71, 0xea, 0xb3, 0xd2, 0x39, 0xa2, 0xe0, 0x30, 0x66, 0x61, 0x72, 0x0d, 0x96, 0x46, + 0x3e, 0xf6, 0x74, 0xdf, 0xc5, 0x7d, 0x6b, 0xd7, 0xc2, 0xa6, 0xce, 0xaa, 0x43, 0xb4, 0x3a, 0x44, + 0xca, 0x7a, 0xa2, 0x88, 0xf2, 0xcc, 0x5a, 0xca, 0x1c, 0xcb, 0x5c, 0xca, 0x7c, 0xdc, 0x50, 0xe6, + 0x3a, 0x54, 0xbf, 0x88, 0x0f, 0xd8, 0x62, 0x7f, 0x42, 0x3a, 0xf5, 0xdb, 0x15, 0x58, 0xc9, 0x39, + 0x06, 0xa2, 0x2b, 0x45, 0x77, 0xa4, 0xbb, 0xd8, 0xb3, 0x1c, 0x53, 0xf4, 0x5a, 0xdf, 0x1d, 0x75, + 0x29, 0x00, 0x9d, 0x04, 0xf2, 0xa1, 0x7f, 0x7d, 0xe4, 0xf0, 0x60, 0xb4, 0xac, 0x55, 0xfb, 0xee, + 0xe8, 0x4b, 0xe4, 0x5b, 0xd0, 0xfa, 0xfb, 0x86, 0x87, 0x99, 0xff, 0x60, 0xb4, 0x3d, 0x0a, 0x40, + 0x2f, 0xc2, 0x71, 0x36, 0x37, 0xea, 0x03, 0x6b, 0x68, 0x11, 0x2f, 0x1b, 0x1b, 0x1a, 0x65, 0x0d, + 0xb1, 0xc2, 0x7b, 0xa4, 0xac, 0x63, 0xb3, 0xc1, 0xa0, 0x42, 0xc3, 0x71, 0x86, 0xba, 0xdf, 0x77, + 0x3c, 0xac, 0x1b, 0xe6, 0x87, 0x74, 0x1c, 0x94, 0xb5, 0xba, 0xe3, 0x0c, 0x7b, 0x04, 0xd6, 0x36, + 0x3f, 0x44, 0x67, 0xa1, 0xde, 0x77, 0x47, 0x3e, 0x0e, 0x74, 0xf2, 0x87, 0x6e, 0xd6, 0xd5, 0x34, + 0x60, 0xa0, 0x35, 0x77, 0xe4, 0xc7, 0x10, 0x86, 0x64, 0x79, 0x36, 0x1b, 0x47, 0xb8, 0x8f, 0x87, + 0xf4, 0xb4, 0x7b, 0x7f, 0xb4, 0x87, 0x5d, 0x63, 0x0f, 0x33, 0xd1, 0xc4, 0x8e, 0x9b, 0x74, 0xda, + 0xfd, 0x0e, 0x47, 0xa1, 0x02, 0x6a, 0xf3, 0xfb, 0xf1, 0x4f, 0x1f, 0xbd, 0x0b, 0xb3, 0x23, 0x9b, + 0x1a, 0xc0, 0x6a, 0x8d, 0xd2, 0x5e, 0x9b, 0xe0, 0xd0, 0xed, 0xca, 0x0e, 0x23, 0xe1, 0x67, 0x80, + 0x9c, 0x01, 0xba, 0x05, 0x2d, 0xae, 0x28, 0xff, 0xb1, 0xe1, 0x26, 0xb5, 0x05, 0x54, 0x05, 0xcb, + 0x0c, 0xa3, 0xf7, 0xd8, 0x70, 0xe3, 0x1a, 0x6b, 0xdd, 0x82, 0xb9, 0x38, 0xd3, 0x43, 0xd9, 0xd2, + 0x1d, 0x68, 0x48, 0x8d, 0x24, 0xbd, 0x4d, 0x95, 0xe2, 0x5b, 0xdf, 0x10, 0x63, 0xab, 0x4a, 0x00, + 0x3d, 0xeb, 0x1b, 0x34, 0x47, 0x81, 0x4a, 0x46, 0xf9, 0x54, 0x34, 0xf6, 0xa1, 0x1a, 0xd0, 0x90, + 0xd2, 0x02, 0x88, 0x4b, 0xa6, 0xe7, 0xff, 0xdc, 0x25, 0x93, 0xdf, 0x04, 0xe6, 0x39, 0x03, 0x21, + 0x01, 0xfd, 0x4d, 0x60, 0xf4, 0x00, 0x9a, 0x1d, 0xa7, 0xd1, 0xdf, 0xb4, 0x0a, 0xfc, 0x88, 0xe7, + 0xf7, 0xd4, 0x34, 0xf6, 0xa1, 0xfe, 0xb6, 0x02, 0xb0, 0x66, 0xb8, 0xc6, 0x03, 0x6b, 0x60, 0x05, + 0x07, 0xe8, 0x22, 0x34, 0x0d, 0xd3, 0xd4, 0xfb, 0x02, 0x62, 0x61, 0x91, 0x70, 0xb5, 0x60, 0x98, + 0xe6, 0x5a, 0x0c, 0x8c, 0x9e, 0x87, 0x45, 0xe2, 0x50, 0x65, 0x5c, 0x96, 0x81, 0xd5, 0x24, 0x05, + 0x12, 0xf2, 0x0d, 0x58, 0x25, 0x7c, 0x8d, 0xe1, 0x03, 0x0b, 0xdb, 0x81, 0x4c, 0xc3, 0x52, 0xb3, + 0x96, 0x0d, 0xd3, 0x6c, 0xb3, 0xe2, 0x38, 0xa5, 0xfa, 0x37, 0x33, 0x70, 0x5a, 0xee, 0xf1, 0x64, + 0xa6, 0xc6, 0x2d, 0x98, 0x4b, 0xc8, 0x9b, 0xca, 0x71, 0x88, 0x5a, 0xa8, 0x49, 0xb8, 0x89, 0x5c, + 0x84, 0x52, 0x2a, 0x17, 0x21, 0x33, 0x0b, 0xa4, 0xfc, 0x09, 0x65, 0x81, 0x54, 0x3e, 0x66, 0x16, + 0xc8, 0xf4, 0x51, 0xb3, 0x40, 0xe6, 0x26, 0xce, 0x02, 0x79, 0x96, 0xba, 0x5e, 0x51, 0x23, 0x0d, + 0x07, 0x98, 0x4f, 0x68, 0x84, 0xdc, 0x6d, 0x91, 0x05, 0x98, 0xc8, 0x16, 0x99, 0x3d, 0x4c, 0xb6, + 0x48, 0x35, 0x37, 0x5b, 0xe4, 0x1c, 0xcc, 0xd9, 0x8e, 0x6e, 0xe3, 0xc7, 0x3a, 0xe9, 0x16, 0x7f, + 0xb5, 0xce, 0xfa, 0xc8, 0x76, 0x36, 0xf1, 0xe3, 0x2e, 0x81, 0xa0, 0xf3, 0x30, 0x37, 0x34, 0xfc, + 0x87, 0xd8, 0xa4, 0x69, 0x1b, 0xfe, 0x6a, 0x83, 0xda, 0x53, 0x9d, 0xc1, 0xba, 0x04, 0x84, 0x9e, + 0x81, 0x50, 0x0e, 0x8e, 0x34, 0x4f, 0x91, 0x1a, 0x02, 0xca, 0xd0, 0x62, 0x99, 0x27, 0x0b, 0x47, + 0xcc, 0x3c, 0x69, 0x1e, 0x26, 0xf3, 0xe4, 0x32, 0x34, 0xc5, 0x6f, 0x91, 0x7a, 0xc2, 0x4e, 0x12, + 0x68, 0xd6, 0xc9, 0x82, 0x28, 0x13, 0xe9, 0x25, 0x79, 0x89, 0x2a, 0x50, 0x98, 0xa8, 0xf2, 0x87, + 0x0a, 0x5f, 0xd3, 0x86, 0x03, 0x88, 0x9f, 0x90, 0x4b, 0xc9, 0x0d, 0xca, 0x51, 0x92, 0x1b, 0xd0, + 0x76, 0x6e, 0xfa, 0xc7, 0xc5, 0x7c, 0x4e, 0xe3, 0x12, 0x40, 0xd4, 0xfb, 0xe1, 0x72, 0xf3, 0x93, + 0x48, 0x63, 0x53, 0xff, 0x53, 0x81, 0xd3, 0x9c, 0x5f, 0x4e, 0xae, 0x57, 0x86, 0x95, 0x2b, 0x39, + 0x56, 0xde, 0xf7, 0xb0, 0x89, 0xed, 0xc0, 0x32, 0x06, 0x34, 0x80, 0x11, 0x27, 0xc8, 0x11, 0x98, + 0xc6, 0x50, 0xe7, 0x61, 0x8e, 0xa5, 0x63, 0xf2, 0x95, 0x27, 0xcb, 0xba, 0xac, 0xd3, 0x8c, 0x4c, + 0xbe, 0xb8, 0xdc, 0xca, 0xf2, 0x2c, 0x95, 0xdc, 0x2d, 0x8b, 0xb1, 0x0e, 0x46, 0x75, 0x60, 0x25, + 0xe7, 0x2c, 0x3f, 0xb3, 0x9b, 0x94, 0x74, 0x37, 0x15, 0x2a, 0x29, 0xdd, 0x4d, 0xdf, 0x56, 0xe0, + 0x6c, 0x6a, 0x05, 0xfc, 0xd9, 0x6b, 0x56, 0xfd, 0x53, 0x25, 0xb4, 0x9f, 0xa4, 0xc9, 0xaf, 0xa5, + 0x4d, 0xfe, 0x99, 0xa2, 0x05, 0x7d, 0xa6, 0xd1, 0xbf, 0x97, 0x6b, 0xf4, 0xcf, 0x17, 0x6e, 0x0e, + 0x8c, 0xd3, 0xe7, 0xbf, 0x29, 0x70, 0x22, 0x57, 0x80, 0x44, 0x3c, 0xa8, 0x24, 0xe3, 0x41, 0x1e, + 0x4b, 0x46, 0xd1, 0x3f, 0x8b, 0x25, 0x69, 0x80, 0xcf, 0x83, 0x36, 0x7d, 0x68, 0x3c, 0xb1, 0x86, + 0xa3, 0x21, 0x0f, 0x26, 0x09, 0xbb, 0xfb, 0x0c, 0x72, 0x94, 0x68, 0xf2, 0x2a, 0x2c, 0x31, 0x47, + 0x4f, 0x03, 0x9a, 0x88, 0x82, 0x05, 0x95, 0x8b, 0xac, 0x8c, 0xc4, 0x36, 0x9c, 0x40, 0x6d, 0xc3, + 0x62, 0xd8, 0xac, 0xc2, 0x5c, 0xa6, 0x58, 0x6e, 0x52, 0x49, 0xce, 0x4d, 0xb2, 0x61, 0x66, 0x1d, + 0x3f, 0xb2, 0xfa, 0xf8, 0x13, 0x49, 0x8b, 0x3e, 0x07, 0x75, 0x17, 0x7b, 0x43, 0xcb, 0xf7, 0xc3, + 0x59, 0xbd, 0xa6, 0xc5, 0x41, 0xea, 0x59, 0xa8, 0xad, 0xad, 0x77, 0x78, 0x95, 0x19, 0xa2, 0xaa, + 0xff, 0x35, 0x03, 0x0b, 0x49, 0x1b, 0xbb, 0x99, 0xca, 0x95, 0x3a, 0x9d, 0xb9, 0xcf, 0x96, 0xb1, + 0xc1, 0xfc, 0xbc, 0x58, 0x7a, 0x95, 0xd2, 0x89, 0x04, 0xe1, 0xf2, 0x4a, 0xac, 0xc8, 0x56, 0x61, + 0xb6, 0xef, 0x0c, 0x87, 0x86, 0x6d, 0x8a, 0xe4, 0x76, 0xfe, 0x49, 0x24, 0x35, 0xbc, 0x3d, 0xb6, + 0xb5, 0x5c, 0xd3, 0xe8, 0x6f, 0x62, 0x02, 0xc4, 0x19, 0x5a, 0x36, 0xcd, 0xb6, 0xa2, 0xbd, 0x54, + 0xd3, 0x80, 0x83, 0xd6, 0x2d, 0x0f, 0x5d, 0x80, 0x0a, 0xb6, 0x1f, 0x89, 0x33, 0x27, 0x69, 0x8b, + 0x53, 0xac, 0x89, 0x34, 0x8a, 0x81, 0x2e, 0xc2, 0xcc, 0x90, 0x98, 0x95, 0x38, 0x91, 0x5f, 0x4c, + 0x25, 0x81, 0x6b, 0x1c, 0x01, 0xbd, 0x00, 0xb3, 0x26, 0xd5, 0x9e, 0x58, 0x04, 0x20, 0x29, 0x6f, + 0x8b, 0x16, 0x69, 0x02, 0x05, 0xbd, 0x15, 0xee, 0xaf, 0xd7, 0xd2, 0x07, 0x5f, 0x09, 0x35, 0x67, + 0x6e, 0xad, 0x6f, 0xca, 0x8b, 0x54, 0x48, 0xef, 0xd2, 0x27, 0xb9, 0x14, 0x2f, 0x55, 0x4f, 0x40, + 0x75, 0xe0, 0xec, 0x31, 0xeb, 0xa9, 0xb3, 0x9b, 0x11, 0x03, 0x67, 0x8f, 0x1a, 0xcf, 0x12, 0x4c, + 0xfb, 0x81, 0x69, 0xd9, 0x34, 0x96, 0xaa, 0x6a, 0xec, 0x83, 0x0c, 0x52, 0xfa, 0x43, 0x77, 0xec, + 0x3e, 0x5e, 0x6d, 0xd0, 0xa2, 0x1a, 0x85, 0x6c, 0xd9, 0x7d, 0xba, 0xa6, 0x0c, 0x82, 0x83, 0xd5, + 0x79, 0x0a, 0x27, 0x3f, 0xa3, 0x6d, 0xee, 0x85, 0x9c, 0x6d, 0xee, 0x84, 0xc0, 0x19, 0xdb, 0xdc, + 0xcd, 0xdc, 0x39, 0x23, 0x49, 0x2b, 0x48, 0x48, 0x1c, 0xb9, 0xb6, 0xde, 0xd1, 0x45, 0xd7, 0x2c, + 0xa6, 0x73, 0xca, 0x43, 0xb3, 0xd7, 0x20, 0xfc, 0xf9, 0x99, 0x9e, 0x32, 0x7c, 0x4f, 0x81, 0xe5, + 0x35, 0x7a, 0xc6, 0x1a, 0xf3, 0x8d, 0x87, 0x49, 0x4f, 0x7a, 0x29, 0xcc, 0x19, 0xcb, 0x48, 0xfc, + 0x49, 0x6a, 0x4a, 0xa4, 0x8c, 0xad, 0xc1, 0xbc, 0x60, 0xcb, 0x89, 0xcb, 0x13, 0x24, 0x9c, 0x35, + 0xfc, 0xf8, 0xa7, 0xfa, 0x3a, 0xac, 0xa4, 0x24, 0xe7, 0x27, 0x5d, 0xc9, 0xcb, 0x07, 0x4c, 0xf0, + 0xf8, 0xe5, 0x03, 0xf5, 0x16, 0x1c, 0xef, 0x05, 0x86, 0x17, 0xa4, 0x9a, 0x3d, 0x01, 0x2d, 0x4d, + 0x25, 0x93, 0x69, 0x79, 0xb6, 0x57, 0x0f, 0x96, 0x7a, 0x81, 0xe3, 0x1e, 0x81, 0x29, 0xf1, 0x3b, + 0xa4, 0xe5, 0xce, 0x48, 0xcc, 0x33, 0xe2, 0x53, 0x5d, 0x61, 0x89, 0x6f, 0xe9, 0xda, 0xbe, 0x00, + 0xcb, 0x2c, 0xef, 0xec, 0x28, 0x8d, 0x38, 0x21, 0xb2, 0xde, 0xd2, 0x7c, 0xef, 0xc2, 0x31, 0x69, + 0xef, 0x9d, 0xe7, 0x69, 0x5c, 0x93, 0xf3, 0x34, 0xf2, 0x8f, 0x39, 0xc2, 0x34, 0x8d, 0xef, 0x94, + 0x62, 0x7e, 0x3c, 0xe7, 0xb0, 0xf6, 0x15, 0x39, 0x4b, 0xe3, 0x6c, 0x3e, 0x57, 0x29, 0x49, 0x23, + 0x6d, 0x9d, 0xe5, 0x0c, 0xeb, 0xdc, 0x49, 0x9d, 0x04, 0x57, 0xd2, 0x59, 0x36, 0x09, 0x09, 0x3f, + 0x95, 0x33, 0xe0, 0x7b, 0x2c, 0x93, 0x23, 0xac, 0x3a, 0x3c, 0xfe, 0x7d, 0x29, 0x71, 0xfc, 0x7b, + 0xb2, 0x40, 0xd2, 0xf0, 0xe0, 0xf7, 0x3b, 0x15, 0xa8, 0x85, 0x65, 0x29, 0x0d, 0xa7, 0x55, 0x55, + 0xca, 0x50, 0x55, 0x7c, 0x7e, 0x2d, 0x1f, 0x71, 0x7e, 0xad, 0x4c, 0x30, 0xbf, 0x9e, 0x84, 0x1a, + 0xfd, 0x41, 0x93, 0xef, 0xd9, 0x7c, 0x59, 0xa5, 0x00, 0x0d, 0xef, 0x46, 0x26, 0x36, 0x33, 0xa1, + 0x89, 0x25, 0xb2, 0x46, 0x66, 0x93, 0x59, 0x23, 0x37, 0xc3, 0xb9, 0xaf, 0x9a, 0x3e, 0xa5, 0x09, + 0x39, 0x66, 0xce, 0x7a, 0x89, 0xad, 0xd9, 0x5a, 0x7a, 0x6b, 0x36, 0xa2, 0xff, 0xdc, 0x9e, 0x22, + 0x6f, 0xb1, 0x54, 0x90, 0xb8, 0x9d, 0x71, 0x1f, 0xf9, 0x8a, 0x74, 0x0a, 0xa7, 0x64, 0xcc, 0x55, + 0xa1, 0x5f, 0x88, 0x9f, 0xbc, 0xed, 0xc0, 0x72, 0x32, 0x85, 0xec, 0x50, 0x3e, 0x2e, 0x27, 0x97, + 0xf5, 0x37, 0xe2, 0x11, 0x5f, 0x4e, 0xe2, 0xe6, 0xcd, 0x54, 0x8e, 0xc1, 0xc4, 0x16, 0x7a, 0x4d, + 0x4e, 0x47, 0x3a, 0xb4, 0x5d, 0xa5, 0xb2, 0x91, 0x68, 0x44, 0x62, 0x78, 0xbc, 0x98, 0x05, 0xe7, + 0x35, 0x0e, 0x69, 0xd3, 0x95, 0xc1, 0xae, 0x65, 0x5b, 0xfe, 0x3e, 0x2b, 0x9f, 0x61, 0x2b, 0x03, + 0x01, 0x6a, 0xd3, 0x5d, 0x4b, 0xfc, 0xc4, 0x0a, 0xf4, 0xbe, 0x63, 0x62, 0x6a, 0xb5, 0xd3, 0x5a, + 0x95, 0x00, 0xd6, 0x1c, 0x13, 0x47, 0xe3, 0xa9, 0x7a, 0xd8, 0xf1, 0x54, 0x4b, 0x8c, 0xa7, 0x65, + 0x98, 0xf1, 0xb0, 0xe1, 0x3b, 0x36, 0xdb, 0xcc, 0xd0, 0xf8, 0x17, 0xe9, 0x88, 0x21, 0xf6, 0x7d, + 0x52, 0x07, 0x0f, 0xc0, 0xf8, 0x67, 0x2c, 0x58, 0x9c, 0x2b, 0x08, 0x16, 0x0b, 0xd2, 0x42, 0x13, + 0xc1, 0x62, 0xa3, 0x20, 0x58, 0x9c, 0x28, 0x2b, 0x34, 0x0a, 0x8b, 0xe7, 0xc7, 0x85, 0xc5, 0xf1, + 0xb8, 0x72, 0x41, 0x8e, 0x2b, 0x5f, 0x8f, 0xaf, 0x50, 0x9b, 0xe9, 0x43, 0xf2, 0xe2, 0xcb, 0x26, + 0x9f, 0xe1, 0x00, 0xfe, 0x47, 0x05, 0x56, 0x52, 0x03, 0x8e, 0x0f, 0xe1, 0x97, 0x12, 0xf9, 0xa6, + 0x85, 0x89, 0x9e, 0x22, 0xdd, 0xb4, 0x2d, 0xa5, 0x9b, 0x5e, 0x2e, 0x22, 0xc9, 0xc9, 0x36, 0x3d, + 0x7a, 0x06, 0xe8, 0xb7, 0x14, 0x40, 0x19, 0x6b, 0xf0, 0x9b, 0x22, 0x5a, 0x3f, 0xc4, 0x6e, 0x19, + 0x0f, 0xd8, 0xdf, 0x8a, 0x02, 0xf6, 0xd2, 0x61, 0xf6, 0x1d, 0xc2, 0xd4, 0x94, 0x1f, 0x97, 0xe0, + 0xec, 0x8e, 0x6b, 0x26, 0xc2, 0x48, 0x8e, 0x35, 0xb9, 0x67, 0xbb, 0x29, 0xe7, 0xd5, 0x1c, 0xb1, + 0x09, 0xe5, 0xa3, 0x34, 0x01, 0x7d, 0x2d, 0x2b, 0xf3, 0xe9, 0x75, 0xe9, 0x8c, 0xb2, 0xb8, 0x81, + 0x63, 0xa6, 0xaf, 0x8f, 0x6b, 0xc2, 0x2a, 0x9c, 0xcb, 0x17, 0x80, 0x87, 0x9c, 0xff, 0x1f, 0x16, + 0x36, 0x9e, 0xe0, 0x7e, 0xef, 0xc0, 0xee, 0x1f, 0x42, 0xeb, 0x4d, 0x28, 0xf7, 0x87, 0x26, 0x3f, + 0x1d, 0x21, 0x3f, 0xe3, 0x51, 0x74, 0x59, 0x8e, 0xa2, 0x75, 0x68, 0x46, 0x35, 0xf0, 0x01, 0xb4, + 0x4c, 0x06, 0x90, 0x49, 0x90, 0x09, 0xf3, 0x39, 0x8d, 0x7f, 0x71, 0x38, 0xf6, 0xd8, 0x4d, 0x16, + 0x06, 0xc7, 0x9e, 0x27, 0x7b, 0xed, 0xb2, 0xec, 0xb5, 0xd5, 0xef, 0x2a, 0x50, 0x27, 0x35, 0x7c, + 0x2c, 0xf9, 0xf9, 0x52, 0xb6, 0x1c, 0x2d, 0x65, 0xc3, 0x15, 0x71, 0x25, 0xbe, 0x22, 0x8e, 0x24, + 0x9f, 0xa6, 0xe0, 0xb4, 0xe4, 0x33, 0x21, 0x1c, 0x7b, 0x9e, 0x7a, 0x0e, 0xe6, 0x98, 0x6c, 0xbc, + 0xe5, 0x4d, 0x28, 0x8f, 0xbc, 0x81, 0xe8, 0xbf, 0x91, 0x37, 0x50, 0xbf, 0xa9, 0x40, 0xa3, 0x1d, + 0x04, 0x46, 0x7f, 0xff, 0x10, 0x0d, 0x08, 0x85, 0x2b, 0xc5, 0x85, 0x4b, 0x37, 0x22, 0x12, 0xb7, + 0x92, 0x23, 0xee, 0xb4, 0x24, 0xae, 0x0a, 0xf3, 0x42, 0x96, 0x5c, 0x81, 0x37, 0x01, 0x75, 0x1d, + 0x2f, 0x78, 0xdb, 0xf1, 0x1e, 0x1b, 0x9e, 0x79, 0xb8, 0x55, 0x2b, 0x82, 0x0a, 0x7f, 0x5b, 0xa0, + 0x7c, 0x61, 0x5a, 0xa3, 0xbf, 0xd5, 0xe7, 0xe0, 0x98, 0xc4, 0x2f, 0xb7, 0xe2, 0x5b, 0x50, 0xa7, + 0xb3, 0x30, 0x5f, 0xd0, 0x3c, 0x1f, 0x3f, 0xd8, 0x1f, 0x33, 0x5b, 0xab, 0xeb, 0xb0, 0x48, 0xe2, + 0x31, 0x0a, 0x0f, 0xfd, 0xcb, 0xd5, 0x44, 0xcc, 0xbf, 0x92, 0x62, 0x91, 0x88, 0xf7, 0x7f, 0xaa, + 0xc0, 0x34, 0x3b, 0xc3, 0x4f, 0xc6, 0x48, 0x27, 0xc9, 0x3c, 0xe7, 0x3a, 0x7a, 0x60, 0xec, 0x85, + 0xef, 0x36, 0x10, 0xc0, 0xb6, 0xb1, 0x47, 0x4f, 0x74, 0x68, 0xa1, 0x69, 0xed, 0x61, 0x3f, 0x10, + 0x27, 0x84, 0x75, 0x02, 0x5b, 0x67, 0x20, 0xa2, 0x18, 0x7a, 0x90, 0x5a, 0xa1, 0xe7, 0xa5, 0xf4, + 0x37, 0xba, 0xc0, 0x2e, 0x41, 0x16, 0x1f, 0x8b, 0xd1, 0xcb, 0x91, 0x2d, 0xa8, 0x26, 0xce, 0xb3, + 0xc2, 0x6f, 0x74, 0x11, 0x2a, 0x74, 0xff, 0x79, 0xb6, 0x48, 0x4b, 0x14, 0x85, 0x58, 0x85, 0x6b, + 0xd9, 0x36, 0x36, 0x69, 0x00, 0x54, 0xd5, 0xf8, 0x97, 0xfa, 0x16, 0xa0, 0xb8, 0xf2, 0x78, 0x07, + 0x5d, 0x84, 0x19, 0xaa, 0x5b, 0x11, 0xc4, 0x2e, 0xa6, 0x58, 0x6b, 0x1c, 0x41, 0xfd, 0x2a, 0x20, + 0x56, 0x97, 0x14, 0xb8, 0x1e, 0xa6, 0x03, 0x0b, 0x42, 0xd8, 0x3f, 0x53, 0xe0, 0x98, 0xc4, 0x9d, + 0xcb, 0xf7, 0x9c, 0xcc, 0x3e, 0x43, 0x3c, 0xce, 0xfa, 0x0d, 0x69, 0x66, 0xbe, 0x98, 0x16, 0xe3, + 0xe7, 0x34, 0x2b, 0xff, 0x93, 0x02, 0xd0, 0x1e, 0x05, 0xfb, 0x7c, 0xa3, 0x35, 0xde, 0x89, 0x4a, + 0xa2, 0x13, 0x5b, 0x50, 0x75, 0x0d, 0xdf, 0x7f, 0xec, 0x78, 0x62, 0x11, 0x19, 0x7e, 0xd3, 0xed, + 0xd1, 0x11, 0x7f, 0xcc, 0xa1, 0xa6, 0xd1, 0xdf, 0xe8, 0x19, 0x98, 0x67, 0x0f, 0x8a, 0xe8, 0x86, + 0x69, 0x7a, 0x22, 0x59, 0xb0, 0xa6, 0x35, 0x18, 0xb4, 0xcd, 0x80, 0x04, 0xcd, 0xa2, 0xa7, 0x11, + 0xc1, 0x81, 0x1e, 0x38, 0x0f, 0xb1, 0xcd, 0x17, 0x86, 0x0d, 0x01, 0xdd, 0x26, 0x40, 0x76, 0xdc, + 0xb8, 0x67, 0xf9, 0x81, 0x27, 0xd0, 0xc4, 0xa1, 0x29, 0x87, 0x52, 0x34, 0xf5, 0x8f, 0x14, 0x68, + 0x76, 0x47, 0x83, 0x01, 0x53, 0xee, 0x51, 0x3a, 0xf9, 0x12, 0x6f, 0x4a, 0x29, 0x6d, 0xf2, 0x91, + 0xa2, 0x78, 0x13, 0x3f, 0x91, 0xbd, 0xac, 0x6b, 0xb0, 0x18, 0x93, 0x98, 0x1b, 0x8e, 0x14, 0xd9, + 0x2b, 0x72, 0x64, 0xaf, 0xb6, 0x01, 0xb1, 0xed, 0x9b, 0x23, 0xb7, 0x52, 0x3d, 0x0e, 0xc7, 0x24, + 0x16, 0x7c, 0x2a, 0xbe, 0x04, 0x0d, 0x9e, 0xb8, 0xc6, 0x0d, 0xe2, 0x04, 0x54, 0x89, 0x4b, 0xed, + 0x5b, 0xa6, 0xc8, 0x90, 0x98, 0x75, 0x1d, 0x73, 0xcd, 0x32, 0x3d, 0xf5, 0x4b, 0xd0, 0xe0, 0x37, + 0xe3, 0x39, 0xee, 0x6d, 0x98, 0xe7, 0xe7, 0x83, 0xba, 0x74, 0x95, 0xf4, 0x44, 0x46, 0x76, 0xa4, + 0x50, 0x85, 0x1d, 0xff, 0x54, 0xbf, 0x06, 0x2d, 0x16, 0x2d, 0x48, 0x8c, 0x45, 0x03, 0x6f, 0x83, + 0x48, 0x4e, 0x2a, 0xe0, 0x2f, 0x53, 0x36, 0xbc, 0xf8, 0xa7, 0x7a, 0x1a, 0x4e, 0x66, 0xf2, 0xe7, + 0xad, 0x77, 0xa1, 0x19, 0x15, 0xb0, 0xfb, 0x8e, 0x61, 0xda, 0x87, 0x12, 0x4b, 0xfb, 0x58, 0x0e, + 0x63, 0xef, 0x92, 0x98, 0xb9, 0x68, 0x78, 0x1d, 0xad, 0xb8, 0xca, 0x79, 0x2b, 0xae, 0x8a, 0xb4, + 0xe2, 0x52, 0xef, 0x87, 0x3a, 0xe4, 0xeb, 0xde, 0xd7, 0xe9, 0xca, 0x9c, 0xd5, 0x2d, 0x9c, 0xda, + 0xa9, 0xec, 0xf6, 0x31, 0x24, 0x2d, 0x86, 0xaf, 0x5e, 0x84, 0x86, 0xec, 0xde, 0x62, 0x1e, 0x4b, + 0x49, 0x79, 0xac, 0xf9, 0x84, 0xb3, 0x7a, 0x31, 0xb1, 0xa4, 0xc8, 0xd2, 0x6b, 0x62, 0x41, 0x71, + 0x43, 0x72, 0x5b, 0x4f, 0x4b, 0x47, 0xf4, 0x3f, 0x27, 0x8f, 0xb5, 0xc4, 0xfd, 0xf8, 0xdb, 0x3e, + 0xa1, 0xe7, 0x0d, 0x55, 0x9f, 0x82, 0xfa, 0x4e, 0xde, 0xfb, 0x24, 0x15, 0x91, 0x57, 0xf6, 0x2a, + 0x2c, 0xbd, 0x6d, 0x0d, 0xb0, 0x7f, 0xe0, 0x07, 0x78, 0xd8, 0xa1, 0xee, 0x65, 0xd7, 0xc2, 0x1e, + 0x3a, 0x03, 0x40, 0x57, 0x91, 0xae, 0x63, 0x85, 0x6f, 0x32, 0xc4, 0x20, 0xea, 0x8f, 0x14, 0x58, + 0x88, 0x08, 0x27, 0x49, 0x1e, 0x7c, 0x05, 0xa6, 0x77, 0x7d, 0xb1, 0xdb, 0x96, 0x38, 0x83, 0xc8, + 0x12, 0x41, 0xab, 0xec, 0xfa, 0x1d, 0x13, 0xbd, 0x0a, 0x30, 0xf2, 0xb1, 0xc9, 0x8f, 0xfd, 0xc6, + 0xa4, 0x73, 0xd6, 0x08, 0x2a, 0x3b, 0x38, 0xbc, 0x01, 0x75, 0xcb, 0x76, 0x4c, 0x4c, 0x8f, 0x84, + 0xcd, 0x71, 0xa9, 0x9c, 0xc0, 0x70, 0x77, 0x7c, 0x6c, 0xaa, 0xbf, 0x17, 0x1d, 0xec, 0x7e, 0x9e, + 0x5b, 0xa8, 0xfe, 0xb1, 0x98, 0x60, 0x45, 0xb7, 0x73, 0x9b, 0x7d, 0x07, 0x16, 0x99, 0x9f, 0xdc, + 0x0d, 0xeb, 0xcc, 0xbc, 0xe3, 0x92, 0x68, 0x9c, 0xd6, 0xb4, 0x78, 0x68, 0x25, 0x88, 0x50, 0x17, + 0x8e, 0x47, 0x11, 0x6f, 0x9c, 0x5b, 0x69, 0x3c, 0xb7, 0xa5, 0x7e, 0x6c, 0x73, 0x56, 0x10, 0xaa, + 0xb7, 0xe0, 0x78, 0x22, 0x8d, 0x7d, 0xf2, 0x1d, 0xfa, 0x77, 0x13, 0x5b, 0x6d, 0xd1, 0x28, 0xbd, + 0x26, 0xdf, 0x9e, 0x2a, 0xba, 0x70, 0xc0, 0x2f, 0xf2, 0xec, 0xc0, 0x09, 0x69, 0x1f, 0x50, 0x92, + 0xe5, 0x46, 0x22, 0xfe, 0x3c, 0x97, 0xcf, 0x2f, 0x11, 0x88, 0xfe, 0xb7, 0x02, 0x4b, 0x59, 0x08, + 0x47, 0xdc, 0x83, 0xfe, 0x20, 0xe7, 0xe6, 0xe5, 0x4b, 0xe3, 0x04, 0xfa, 0x54, 0xf6, 0xec, 0x37, + 0xd9, 0xbd, 0xad, 0xf1, 0x7d, 0x52, 0x9e, 0xac, 0x4f, 0x7e, 0x5a, 0x8a, 0x9d, 0xb3, 0x14, 0xdc, + 0xad, 0xfa, 0x18, 0xfb, 0x9e, 0x6b, 0x89, 0xab, 0x55, 0xcf, 0x67, 0x12, 0x8e, 0xb9, 0x59, 0xa5, + 0x65, 0xed, 0x2f, 0x5c, 0x1b, 0xc7, 0xe9, 0x73, 0xbb, 0x25, 0xfe, 0x9b, 0x25, 0x98, 0x97, 0x3b, + 0x04, 0xbd, 0x95, 0x71, 0xaf, 0xea, 0xec, 0x98, 0x06, 0x4a, 0xd7, 0xaa, 0xf8, 0x3d, 0xa6, 0xd2, + 0xe4, 0xf7, 0x98, 0xca, 0x93, 0xdd, 0x63, 0xba, 0x03, 0xf3, 0x8f, 0x3d, 0x2b, 0x30, 0x1e, 0x0c, + 0xb0, 0x3e, 0x30, 0x0e, 0xb0, 0xc7, 0x1d, 0x7b, 0xa1, 0x2b, 0x6a, 0x08, 0x92, 0x7b, 0x84, 0x82, + 0xae, 0xbc, 0x1e, 0x1b, 0x2e, 0x5f, 0xc0, 0x49, 0x31, 0x61, 0xef, 0xb1, 0xe1, 0x32, 0x1a, 0x8a, + 0xa2, 0x7e, 0xb3, 0x04, 0xc7, 0x33, 0x6f, 0xdf, 0x7c, 0x7c, 0x15, 0x5d, 0x8e, 0xab, 0xe8, 0x30, + 0x57, 0x9a, 0xca, 0x87, 0xba, 0xd2, 0xd4, 0xc9, 0x51, 0x58, 0xd6, 0x41, 0x7e, 0xb1, 0xde, 0xd4, + 0xbf, 0x54, 0xa0, 0x2a, 0x84, 0x1a, 0x7b, 0xc1, 0x68, 0x65, 0x44, 0xd0, 0x74, 0x9a, 0x04, 0x6e, + 0x1b, 0xb6, 0xa3, 0xfb, 0x98, 0x04, 0x65, 0x63, 0xaf, 0x73, 0x2c, 0x51, 0xba, 0x35, 0xc7, 0xc3, + 0x9b, 0x86, 0xed, 0xf4, 0x18, 0x11, 0x6a, 0x43, 0x93, 0xf1, 0xa3, 0xac, 0x08, 0xd3, 0xb1, 0x13, + 0xe5, 0x3c, 0x25, 0x20, 0x4c, 0x08, 0x33, 0x5f, 0xfd, 0xbe, 0x02, 0x0b, 0x09, 0xcd, 0xfe, 0xe2, + 0x35, 0xe2, 0x77, 0xcb, 0x50, 0x8f, 0xf5, 0xf2, 0x98, 0x06, 0xac, 0xc1, 0xa2, 0x48, 0xc6, 0xf1, + 0x71, 0x30, 0xd9, 0x75, 0x9a, 0x05, 0x4e, 0xd1, 0xc3, 0x01, 0x8b, 0xa3, 0x6e, 0xc3, 0x82, 0xf1, + 0xc8, 0xb0, 0x06, 0xd4, 0x82, 0x26, 0x0a, 0x51, 0xe6, 0x43, 0xfc, 0x30, 0x12, 0x63, 0xed, 0x9e, + 0xe8, 0x52, 0x0d, 0x50, 0xdc, 0xe8, 0x6e, 0x93, 0xef, 0xc7, 0x32, 0xbe, 0x0a, 0xef, 0x36, 0xf9, + 0x7e, 0x58, 0x1f, 0xcd, 0x80, 0xa7, 0x97, 0xba, 0x7c, 0xfe, 0x12, 0x48, 0x7e, 0x7d, 0x04, 0xf7, + 0x6d, 0x8a, 0x4a, 0x14, 0x36, 0x34, 0x3e, 0x74, 0x3c, 0x3d, 0x4e, 0x3f, 0x3b, 0x46, 0x61, 0x94, + 0xa2, 0x1b, 0x32, 0x51, 0xff, 0x5c, 0x81, 0x5a, 0xe8, 0x47, 0xc6, 0xf4, 0x50, 0x07, 0x96, 0xe8, + 0x75, 0x81, 0xa4, 0x86, 0xc7, 0x74, 0x12, 0x22, 0x44, 0x6d, 0x59, 0xcb, 0x6d, 0x68, 0x52, 0x56, + 0x71, 0x55, 0x8f, 0xeb, 0x28, 0x5f, 0x88, 0xc9, 0x02, 0xca, 0xbf, 0x2a, 0x01, 0x4a, 0xbb, 0x92, + 0x5f, 0x18, 0x23, 0x8b, 0x77, 0x5a, 0x65, 0xf2, 0x4e, 0xbf, 0x0b, 0xc7, 0xfa, 0xce, 0x70, 0x68, + 0xd1, 0xab, 0x26, 0x8e, 0x77, 0x30, 0x99, 0xb9, 0x2d, 0x32, 0x1a, 0xa6, 0x27, 0xa6, 0xbe, 0x37, + 0xe1, 0x84, 0x86, 0x1d, 0x17, 0xdb, 0xa1, 0xeb, 0xbf, 0xe7, 0xec, 0x1d, 0x22, 0xbe, 0x3d, 0x05, + 0xad, 0x2c, 0x7a, 0xbe, 0x10, 0x1f, 0x41, 0x6b, 0x6d, 0x1f, 0xf7, 0x1f, 0xd2, 0xe5, 0xd7, 0x51, + 0x12, 0x6a, 0x5a, 0x50, 0x1d, 0x38, 0x7d, 0xf6, 0xac, 0x2a, 0xdf, 0xab, 0x12, 0xdf, 0x05, 0xc7, + 0x04, 0xa7, 0xe1, 0x64, 0x66, 0xb5, 0x5c, 0x2a, 0x04, 0xcd, 0xbb, 0x38, 0xd8, 0x78, 0x84, 0xed, + 0x30, 0x7c, 0x56, 0x7f, 0x50, 0x8a, 0x05, 0xea, 0xb4, 0xe8, 0x10, 0x89, 0x48, 0xa8, 0x0b, 0xd1, + 0xca, 0x41, 0xc7, 0x84, 0x9a, 0x3d, 0x72, 0xc8, 0x9e, 0x07, 0xcd, 0x3e, 0xa4, 0xa4, 0x95, 0xd0, + 0xb7, 0x0d, 0xa3, 0xe7, 0x5b, 0x42, 0x58, 0xe2, 0xe8, 0xba, 0x9c, 0x3c, 0xba, 0x7e, 0x17, 0x50, + 0x3c, 0x14, 0xe7, 0xcb, 0xfd, 0xca, 0x04, 0x2f, 0xd6, 0x34, 0xdd, 0xe4, 0xdb, 0x4a, 0x39, 0xef, + 0xce, 0x4c, 0x1f, 0xe9, 0xdd, 0x19, 0xf5, 0x0c, 0x9c, 0x22, 0x01, 0xf6, 0x7d, 0x1c, 0x78, 0x56, + 0x7f, 0x1d, 0xfb, 0x7d, 0xcf, 0x72, 0x03, 0x27, 0xcc, 0x8d, 0x51, 0x75, 0x38, 0x9d, 0x53, 0xce, + 0xd5, 0xfd, 0x26, 0xd4, 0xcd, 0x08, 0x9c, 0xb5, 0x75, 0x92, 0xa4, 0xd5, 0xe2, 0x04, 0xea, 0xfb, + 0xd0, 0x4c, 0x22, 0x64, 0xa6, 0xd2, 0x22, 0xa8, 0xec, 0xe3, 0x81, 0x2b, 0xee, 0x06, 0x91, 0xdf, + 0x44, 0xeb, 0x6c, 0xed, 0xf2, 0x10, 0x1f, 0x88, 0xad, 0xf5, 0x1a, 0x85, 0x7c, 0x11, 0x1f, 0x84, + 0x6d, 0x93, 0x1e, 0x42, 0xf0, 0xac, 0x7e, 0xb2, 0x6d, 0x19, 0xe5, 0x51, 0xdb, 0x48, 0xb7, 0x0d, + 0x19, 0x98, 0xb7, 0xed, 0x74, 0xee, 0x23, 0x0b, 0x94, 0x16, 0x5c, 0xc7, 0xe4, 0xbf, 0xd5, 0x3f, + 0x51, 0x60, 0x31, 0x85, 0x31, 0xe1, 0x71, 0xc9, 0x0b, 0x30, 0x2b, 0xea, 0x2d, 0xa5, 0xf3, 0x4d, + 0x19, 0x2f, 0x4d, 0xa0, 0xa0, 0x0e, 0x2c, 0x46, 0x16, 0x2d, 0xe8, 0xca, 0xe9, 0xbe, 0x88, 0x2f, + 0x5c, 0xa8, 0xb8, 0xcd, 0x7e, 0x02, 0xa2, 0xf6, 0xa1, 0x99, 0xc4, 0x9a, 0x64, 0x4c, 0x1d, 0x4a, + 0x5e, 0xf5, 0xef, 0x15, 0x98, 0x61, 0xb0, 0xcc, 0xce, 0x96, 0xa6, 0x83, 0x52, 0x72, 0x3a, 0x78, + 0x0d, 0xea, 0x8c, 0x8f, 0x1e, 0xde, 0x0c, 0x9b, 0x97, 0x77, 0x8c, 0x19, 0x6b, 0x3a, 0x5a, 0x61, + 0x18, 0xfe, 0x26, 0xcd, 0x60, 0xf6, 0x42, 0x57, 0x26, 0x22, 0xab, 0xb8, 0x4e, 0x61, 0xd4, 0xe5, + 0x92, 0x90, 0x99, 0xaf, 0x61, 0xc6, 0xf8, 0x66, 0xbe, 0xb5, 0xb5, 0x4c, 0x9f, 0xf5, 0x4b, 0xed, + 0x99, 0xaa, 0xdb, 0xf4, 0xdd, 0xbd, 0xf4, 0x5e, 0x27, 0xfa, 0x82, 0x7c, 0xee, 0xfe, 0x4c, 0xea, + 0xd0, 0x5a, 0x22, 0x1b, 0x79, 0xec, 0xf9, 0x69, 0x46, 0xa3, 0x7e, 0x00, 0x27, 0x72, 0x71, 0xd0, + 0x1b, 0xe1, 0x23, 0xa7, 0xa6, 0x67, 0x3d, 0xe2, 0x1b, 0x0b, 0xf3, 0xf2, 0x83, 0x0a, 0x6b, 0x14, + 0x61, 0x9d, 0x96, 0x8b, 0xe7, 0x4f, 0xd9, 0xd7, 0xa5, 0x67, 0xa1, 0x2a, 0x9e, 0x06, 0x47, 0xb3, + 0x50, 0xde, 0x5e, 0xeb, 0x36, 0xa7, 0xc8, 0x8f, 0x9d, 0xf5, 0x6e, 0x53, 0x41, 0x55, 0xa8, 0xf4, + 0xd6, 0xb6, 0xbb, 0xcd, 0xd2, 0xa5, 0x21, 0x34, 0x93, 0xaf, 0x63, 0xa3, 0x15, 0x38, 0xd6, 0xd5, + 0xb6, 0xba, 0xed, 0xbb, 0xed, 0xed, 0xce, 0xd6, 0xa6, 0xde, 0xd5, 0x3a, 0xef, 0xb5, 0xb7, 0x37, + 0x9a, 0x53, 0xe8, 0x3c, 0x9c, 0x8e, 0x17, 0xbc, 0xb3, 0xd5, 0xdb, 0xd6, 0xb7, 0xb7, 0xf4, 0xb5, + 0xad, 0xcd, 0xed, 0x76, 0x67, 0x73, 0x43, 0x6b, 0x2a, 0xe8, 0x34, 0x9c, 0x88, 0xa3, 0xdc, 0xe9, + 0xac, 0x77, 0xb4, 0x8d, 0x35, 0xf2, 0xbb, 0x7d, 0xaf, 0x59, 0xba, 0xf4, 0x06, 0x34, 0xa4, 0xbb, + 0x30, 0x44, 0xa4, 0xee, 0xd6, 0x7a, 0x73, 0x0a, 0x35, 0xa0, 0x16, 0xe7, 0x53, 0x85, 0xca, 0xe6, + 0xd6, 0xfa, 0x46, 0xb3, 0x84, 0x00, 0x66, 0xb6, 0xdb, 0xda, 0xdd, 0x8d, 0xed, 0x66, 0xf9, 0xd2, + 0xad, 0xe4, 0x83, 0x1e, 0x18, 0x2d, 0x42, 0xa3, 0xd7, 0xde, 0x5c, 0xbf, 0xb3, 0xf5, 0x15, 0x5d, + 0xdb, 0x68, 0xaf, 0xbf, 0xdf, 0x9c, 0x42, 0x4b, 0xd0, 0x14, 0xa0, 0xcd, 0xad, 0x6d, 0x06, 0x55, + 0x2e, 0x3d, 0x4c, 0xac, 0x59, 0x31, 0x3a, 0x0e, 0x8b, 0x61, 0x95, 0xfa, 0x9a, 0xb6, 0xd1, 0xde, + 0xde, 0x20, 0x92, 0x48, 0x60, 0x6d, 0x67, 0x73, 0xb3, 0xb3, 0x79, 0xb7, 0xa9, 0x10, 0xae, 0x11, + 0x78, 0xe3, 0x2b, 0x1d, 0x82, 0x5c, 0x92, 0x91, 0x77, 0x36, 0xbf, 0xb8, 0xb9, 0xf5, 0xe5, 0xcd, + 0x66, 0xf9, 0xd2, 0xaf, 0xc4, 0xd3, 0x34, 0xa2, 0x79, 0xe5, 0x24, 0xac, 0xa4, 0x6a, 0xd4, 0x37, + 0xde, 0xdb, 0xd8, 0xdc, 0x6e, 0x4e, 0xc9, 0x85, 0xbd, 0xed, 0xb6, 0x16, 0x15, 0x2a, 0xc9, 0xc2, + 0xad, 0x6e, 0x37, 0x2c, 0x2c, 0xc9, 0x85, 0xeb, 0x1b, 0xf7, 0x36, 0x22, 0xca, 0xf2, 0xa5, 0xa7, + 0x01, 0xa2, 0xf1, 0x83, 0xea, 0x30, 0xbb, 0xb6, 0xb5, 0xb3, 0xb9, 0xbd, 0xa1, 0x35, 0xa7, 0x50, + 0x0d, 0xa6, 0xef, 0xb6, 0x77, 0xee, 0x6e, 0x34, 0x95, 0x4b, 0x17, 0x61, 0x2e, 0x6e, 0x4d, 0x04, + 0xaf, 0xf7, 0x7e, 0x6f, 0x7b, 0xe3, 0x3e, 0xd1, 0xc8, 0x1c, 0x54, 0xd7, 0xee, 0x6a, 0x5b, 0x3b, + 0xdd, 0xb7, 0x7b, 0x4d, 0xe5, 0xfa, 0xff, 0x2e, 0x85, 0x8f, 0xf9, 0xf6, 0xb0, 0x47, 0x6f, 0x20, + 0xac, 0xc3, 0xac, 0x78, 0x4c, 0x5f, 0xda, 0xb5, 0x91, 0x1f, 0xff, 0x6f, 0x9d, 0xcc, 0x2c, 0xe3, + 0x71, 0xc1, 0x14, 0x7a, 0x8f, 0x6e, 0xe3, 0xc7, 0x9e, 0xd3, 0x3a, 0x97, 0xd8, 0x3a, 0x4f, 0xbd, + 0xda, 0xd5, 0x3a, 0x5f, 0x80, 0x11, 0xf2, 0x7d, 0x1f, 0xe6, 0xe5, 0x77, 0x2b, 0xd1, 0x79, 0x79, + 0x8b, 0x3d, 0xe3, 0x49, 0xcc, 0x96, 0x5a, 0x84, 0x12, 0xb2, 0xd6, 0xa1, 0x99, 0x7c, 0xb7, 0x12, + 0x49, 0x99, 0x2b, 0x39, 0xcf, 0x62, 0xb6, 0x9e, 0x2e, 0x46, 0x8a, 0x57, 0x90, 0x7a, 0x8e, 0xf1, + 0xa9, 0xe2, 0x07, 0xee, 0x32, 0x2a, 0xc8, 0x7b, 0x05, 0x8f, 0x29, 0x47, 0x9e, 0x35, 0x51, 0xe2, + 0x05, 0xc4, 0x8c, 0xc7, 0xd2, 0x64, 0xe5, 0x64, 0x3f, 0x94, 0xa5, 0x4e, 0xa1, 0xff, 0x07, 0x0b, + 0x89, 0xf4, 0x72, 0x24, 0x11, 0x66, 0x67, 0xcd, 0xb7, 0x9e, 0x2a, 0xc4, 0x91, 0x7b, 0x35, 0x9e, + 0x42, 0x9e, 0xec, 0xd5, 0x8c, 0xd4, 0xf4, 0x64, 0xaf, 0x66, 0x66, 0xa0, 0x53, 0x43, 0x94, 0xd2, + 0xc5, 0x65, 0x43, 0xcc, 0x4a, 0x4f, 0x6f, 0x9d, 0x2f, 0xc0, 0x88, 0x2b, 0x24, 0x91, 0x30, 0x2e, + 0x2b, 0x24, 0x3b, 0x15, 0xbd, 0xf5, 0x54, 0x21, 0x4e, 0xb2, 0x27, 0xa3, 0x44, 0xd5, 0x74, 0x4f, + 0xa6, 0x92, 0xa5, 0xd3, 0x3d, 0x99, 0xce, 0x73, 0xe5, 0x3d, 0x99, 0x48, 0x2d, 0x55, 0x0b, 0xd3, + 0xde, 0xb2, 0x7a, 0x32, 0x3b, 0x35, 0x4e, 0x9d, 0x42, 0x8f, 0x61, 0x35, 0x2f, 0xbb, 0x09, 0x3d, + 0x7f, 0x88, 0x24, 0xac, 0xd6, 0x0b, 0x93, 0x21, 0x87, 0x15, 0x63, 0x40, 0xe9, 0xe5, 0x13, 0x7a, + 0x46, 0x56, 0x77, 0xce, 0xf2, 0xac, 0xf5, 0xec, 0x38, 0xb4, 0xb0, 0x9a, 0xbb, 0x50, 0x15, 0x79, + 0x53, 0x48, 0x72, 0x81, 0x89, 0x7c, 0xad, 0xd6, 0xa9, 0xec, 0xc2, 0x90, 0xd1, 0x17, 0xa0, 0x42, + 0xa0, 0x68, 0x25, 0x89, 0x27, 0x18, 0xac, 0xa6, 0x0b, 0x42, 0xe2, 0x36, 0xcc, 0xb0, 0x84, 0x20, + 0x24, 0x9d, 0x48, 0x4a, 0x09, 0x4b, 0xad, 0x56, 0x56, 0x51, 0xc8, 0xa2, 0xcb, 0xfe, 0x35, 0x09, + 0xcf, 0xef, 0x41, 0x67, 0x92, 0x2f, 0x56, 0xcb, 0x89, 0x44, 0xad, 0xb3, 0xb9, 0xe5, 0x71, 0x9b, + 0x4d, 0xec, 0x92, 0x9e, 0x2f, 0xd8, 0xf5, 0xcf, 0xb2, 0xd9, 0xec, 0xb3, 0x04, 0xd6, 0xb9, 0xe9, + 0xb3, 0x06, 0xf4, 0x4c, 0xae, 0xbd, 0x4b, 0x55, 0x3c, 0x3b, 0x0e, 0x2d, 0x3e, 0x34, 0x92, 0x4f, + 0x4f, 0xa9, 0x45, 0xcf, 0xc2, 0x65, 0x0d, 0x8d, 0x9c, 0xe7, 0xe6, 0xd4, 0x29, 0xb4, 0x0f, 0xc7, + 0x32, 0xde, 0xa3, 0x43, 0xcf, 0xe6, 0xfb, 0x5f, 0xa9, 0x96, 0xe7, 0xc6, 0xe2, 0xc5, 0x6b, 0xca, + 0x38, 0xd4, 0x97, 0x6b, 0xca, 0xcf, 0x2a, 0x90, 0x6b, 0x2a, 0xca, 0x0e, 0xa0, 0x86, 0xc8, 0x7d, + 0xc8, 0x89, 0xac, 0x93, 0xee, 0x0c, 0x43, 0x4c, 0x79, 0x8c, 0x7d, 0x38, 0x96, 0xb1, 0xc5, 0x20, + 0x0b, 0x9b, 0xbf, 0xf5, 0x21, 0x0b, 0x5b, 0xb4, 0x57, 0x31, 0x85, 0x3e, 0x00, 0x74, 0x17, 0x07, + 0x72, 0x28, 0xe7, 0x23, 0x69, 0xa0, 0x26, 0x77, 0x33, 0x72, 0xec, 0x53, 0xda, 0xd6, 0x50, 0xa7, + 0xae, 0x29, 0xc8, 0x66, 0x37, 0x58, 0x52, 0x8b, 0x71, 0x74, 0x21, 0xd9, 0x6d, 0x79, 0xeb, 0xf9, + 0xd6, 0xc5, 0x09, 0x30, 0xc3, 0xb6, 0xd8, 0xc9, 0xb7, 0x4f, 0xc5, 0x7a, 0xf0, 0x42, 0xbe, 0x99, + 0xc8, 0x6b, 0xec, 0x74, 0x7d, 0xb9, 0xab, 0xed, 0x30, 0x9e, 0x8b, 0x19, 0xd3, 0xb9, 0xfc, 0x14, + 0x93, 0x9c, 0x78, 0x2e, 0xcb, 0x80, 0xae, 0xff, 0x4e, 0x19, 0xe6, 0x58, 0x2a, 0x0e, 0x0f, 0x3f, + 0xef, 0x03, 0x44, 0x59, 0x6d, 0xe8, 0x74, 0x52, 0x46, 0x29, 0x55, 0xb0, 0x75, 0x26, 0xaf, 0x38, + 0xee, 0xe6, 0x62, 0xd9, 0x62, 0xb2, 0x9b, 0x4b, 0x27, 0xbf, 0xc9, 0x6e, 0x2e, 0x23, 0xcd, 0x4c, + 0x9d, 0x42, 0xef, 0x42, 0x2d, 0x4c, 0x4e, 0x92, 0x8d, 0x27, 0x99, 0x65, 0xd5, 0x3a, 0x9d, 0x53, + 0x1a, 0x97, 0x2e, 0x96, 0x73, 0x24, 0x4b, 0x97, 0xce, 0x67, 0x92, 0xa5, 0xcb, 0x4a, 0x56, 0x8a, + 0xda, 0xcb, 0x92, 0x02, 0x32, 0xda, 0x2b, 0x25, 0x89, 0x64, 0xb4, 0x57, 0xce, 0x26, 0x50, 0xa7, + 0xee, 0xdc, 0xfe, 0xe1, 0x4f, 0xce, 0x28, 0x3f, 0xfa, 0xc9, 0x99, 0xa9, 0x5f, 0xfa, 0xe8, 0x8c, + 0xf2, 0xc3, 0x8f, 0xce, 0x28, 0xff, 0xfc, 0xd1, 0x19, 0xe5, 0xc7, 0x1f, 0x9d, 0x51, 0xbe, 0xf5, + 0x1f, 0x67, 0xa6, 0x3e, 0x50, 0x1f, 0xde, 0xf0, 0xaf, 0x58, 0xce, 0xd5, 0xbe, 0x67, 0x5d, 0x36, + 0x5c, 0xeb, 0xaa, 0xfb, 0x70, 0xef, 0xaa, 0xe1, 0x5a, 0xfe, 0x55, 0xce, 0xf7, 0xea, 0xa3, 0x17, + 0x1f, 0xcc, 0xd0, 0x7f, 0x67, 0xf5, 0xd2, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x24, 0xa8, 0x67, + 0x4b, 0x88, 0x6c, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// RuntimeServiceClient is the client API for RuntimeService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type RuntimeServiceClient interface { + // Version returns the runtime name, runtime version, and runtime API version. + Version(ctx context.Context, in *VersionRequest, opts ...grpc.CallOption) (*VersionResponse, error) + // RunPodSandbox creates and starts a pod-level sandbox. Runtimes must ensure + // the sandbox is in the ready state on success. + RunPodSandbox(ctx context.Context, in *RunPodSandboxRequest, opts ...grpc.CallOption) (*RunPodSandboxResponse, error) + // StopPodSandbox stops any running process that is part of the sandbox and + // reclaims network resources (e.g., IP addresses) allocated to the sandbox. + // If there are any running containers in the sandbox, they must be forcibly + // terminated. + // This call is idempotent, and must not return an error if all relevant + // resources have already been reclaimed. kubelet will call StopPodSandbox + // at least once before calling RemovePodSandbox. It will also attempt to + // reclaim resources eagerly, as soon as a sandbox is not needed. Hence, + // multiple StopPodSandbox calls are expected. + StopPodSandbox(ctx context.Context, in *StopPodSandboxRequest, opts ...grpc.CallOption) (*StopPodSandboxResponse, error) + // RemovePodSandbox removes the sandbox. If there are any running containers + // in the sandbox, they must be forcibly terminated and removed. + // This call is idempotent, and must not return an error if the sandbox has + // already been removed. + RemovePodSandbox(ctx context.Context, in *RemovePodSandboxRequest, opts ...grpc.CallOption) (*RemovePodSandboxResponse, error) + // PodSandboxStatus returns the status of the PodSandbox. If the PodSandbox is not + // present, returns an error. + PodSandboxStatus(ctx context.Context, in *PodSandboxStatusRequest, opts ...grpc.CallOption) (*PodSandboxStatusResponse, error) + // ListPodSandbox returns a list of PodSandboxes. + ListPodSandbox(ctx context.Context, in *ListPodSandboxRequest, opts ...grpc.CallOption) (*ListPodSandboxResponse, error) + // CreateContainer creates a new container in specified PodSandbox + CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) + // StartContainer starts the container. + StartContainer(ctx context.Context, in *StartContainerRequest, opts ...grpc.CallOption) (*StartContainerResponse, error) + // StopContainer stops a running container with a grace period (i.e., timeout). + // This call is idempotent, and must not return an error if the container has + // already been stopped. + // The runtime must forcibly kill the container after the grace period is + // reached. + StopContainer(ctx context.Context, in *StopContainerRequest, opts ...grpc.CallOption) (*StopContainerResponse, error) + // RemoveContainer removes the container. If the container is running, the + // container must be forcibly removed. + // This call is idempotent, and must not return an error if the container has + // already been removed. + RemoveContainer(ctx context.Context, in *RemoveContainerRequest, opts ...grpc.CallOption) (*RemoveContainerResponse, error) + // ListContainers lists all containers by filters. + ListContainers(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) + // ContainerStatus returns status of the container. If the container is not + // present, returns an error. + ContainerStatus(ctx context.Context, in *ContainerStatusRequest, opts ...grpc.CallOption) (*ContainerStatusResponse, error) + // UpdateContainerResources updates ContainerConfig of the container synchronously. + // If runtime fails to transactionally update the requested resources, an error is returned. + UpdateContainerResources(ctx context.Context, in *UpdateContainerResourcesRequest, opts ...grpc.CallOption) (*UpdateContainerResourcesResponse, error) + // ReopenContainerLog asks runtime to reopen the stdout/stderr log file + // for the container. This is often called after the log file has been + // rotated. If the container is not running, container runtime can choose + // to either create a new log file and return nil, or return an error. + // Once it returns error, new container log file MUST NOT be created. + ReopenContainerLog(ctx context.Context, in *ReopenContainerLogRequest, opts ...grpc.CallOption) (*ReopenContainerLogResponse, error) + // ExecSync runs a command in a container synchronously. + ExecSync(ctx context.Context, in *ExecSyncRequest, opts ...grpc.CallOption) (*ExecSyncResponse, error) + // Exec prepares a streaming endpoint to execute a command in the container. + Exec(ctx context.Context, in *ExecRequest, opts ...grpc.CallOption) (*ExecResponse, error) + // Attach prepares a streaming endpoint to attach to a running container. + Attach(ctx context.Context, in *AttachRequest, opts ...grpc.CallOption) (*AttachResponse, error) + // PortForward prepares a streaming endpoint to forward ports from a PodSandbox. + PortForward(ctx context.Context, in *PortForwardRequest, opts ...grpc.CallOption) (*PortForwardResponse, error) + // ContainerStats returns stats of the container. If the container does not + // exist, the call returns an error. + ContainerStats(ctx context.Context, in *ContainerStatsRequest, opts ...grpc.CallOption) (*ContainerStatsResponse, error) + // ListContainerStats returns stats of all running containers. + ListContainerStats(ctx context.Context, in *ListContainerStatsRequest, opts ...grpc.CallOption) (*ListContainerStatsResponse, error) + // PodSandboxStats returns stats of the pod sandbox. If the pod sandbox does not + // exist, the call returns an error. + PodSandboxStats(ctx context.Context, in *PodSandboxStatsRequest, opts ...grpc.CallOption) (*PodSandboxStatsResponse, error) + // ListPodSandboxStats returns stats of the pod sandboxes matching a filter. + ListPodSandboxStats(ctx context.Context, in *ListPodSandboxStatsRequest, opts ...grpc.CallOption) (*ListPodSandboxStatsResponse, error) + // UpdateRuntimeConfig updates the runtime configuration based on the given request. + UpdateRuntimeConfig(ctx context.Context, in *UpdateRuntimeConfigRequest, opts ...grpc.CallOption) (*UpdateRuntimeConfigResponse, error) + // Status returns the status of the runtime. + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) + // CheckpointContainer checkpoints a container + CheckpointContainer(ctx context.Context, in *CheckpointContainerRequest, opts ...grpc.CallOption) (*CheckpointContainerResponse, error) + // GetContainerEvents gets container events from the CRI runtime + GetContainerEvents(ctx context.Context, in *GetEventsRequest, opts ...grpc.CallOption) (RuntimeService_GetContainerEventsClient, error) + // ListMetricDescriptors gets the descriptors for the metrics that will be returned in ListPodSandboxMetrics. + // This list should be static at startup: either the client and server restart together when + // adding or removing metrics descriptors, or they should not change. + // Put differently, if ListPodSandboxMetrics references a name that is not described in the initial + // ListMetricDescriptors call, then the metric will not be broadcasted. + ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) + // ListPodSandboxMetrics gets pod sandbox metrics from CRI Runtime + ListPodSandboxMetrics(ctx context.Context, in *ListPodSandboxMetricsRequest, opts ...grpc.CallOption) (*ListPodSandboxMetricsResponse, error) + // RuntimeConfig returns configuration information of the runtime. + // A couple of notes: + // - The RuntimeConfigRequest object is not to be confused with the contents of UpdateRuntimeConfigRequest. + // The former is for having runtime tell Kubelet what to do, the latter vice versa. + // - It is the expectation of the Kubelet that these fields are static for the lifecycle of the Kubelet. + // The Kubelet will not re-request the RuntimeConfiguration after startup, and CRI implementations should + // avoid updating them without a full node reboot. + RuntimeConfig(ctx context.Context, in *RuntimeConfigRequest, opts ...grpc.CallOption) (*RuntimeConfigResponse, error) +} + +type runtimeServiceClient struct { + cc *grpc.ClientConn +} + +func NewRuntimeServiceClient(cc *grpc.ClientConn) RuntimeServiceClient { + return &runtimeServiceClient{cc} +} + +func (c *runtimeServiceClient) Version(ctx context.Context, in *VersionRequest, opts ...grpc.CallOption) (*VersionResponse, error) { + out := new(VersionResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/Version", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) RunPodSandbox(ctx context.Context, in *RunPodSandboxRequest, opts ...grpc.CallOption) (*RunPodSandboxResponse, error) { + out := new(RunPodSandboxResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/RunPodSandbox", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) StopPodSandbox(ctx context.Context, in *StopPodSandboxRequest, opts ...grpc.CallOption) (*StopPodSandboxResponse, error) { + out := new(StopPodSandboxResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/StopPodSandbox", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) RemovePodSandbox(ctx context.Context, in *RemovePodSandboxRequest, opts ...grpc.CallOption) (*RemovePodSandboxResponse, error) { + out := new(RemovePodSandboxResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/RemovePodSandbox", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) PodSandboxStatus(ctx context.Context, in *PodSandboxStatusRequest, opts ...grpc.CallOption) (*PodSandboxStatusResponse, error) { + out := new(PodSandboxStatusResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/PodSandboxStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) ListPodSandbox(ctx context.Context, in *ListPodSandboxRequest, opts ...grpc.CallOption) (*ListPodSandboxResponse, error) { + out := new(ListPodSandboxResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/ListPodSandbox", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) { + out := new(CreateContainerResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/CreateContainer", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) StartContainer(ctx context.Context, in *StartContainerRequest, opts ...grpc.CallOption) (*StartContainerResponse, error) { + out := new(StartContainerResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/StartContainer", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) StopContainer(ctx context.Context, in *StopContainerRequest, opts ...grpc.CallOption) (*StopContainerResponse, error) { + out := new(StopContainerResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/StopContainer", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) RemoveContainer(ctx context.Context, in *RemoveContainerRequest, opts ...grpc.CallOption) (*RemoveContainerResponse, error) { + out := new(RemoveContainerResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/RemoveContainer", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) ListContainers(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) { + out := new(ListContainersResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/ListContainers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) ContainerStatus(ctx context.Context, in *ContainerStatusRequest, opts ...grpc.CallOption) (*ContainerStatusResponse, error) { + out := new(ContainerStatusResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/ContainerStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) UpdateContainerResources(ctx context.Context, in *UpdateContainerResourcesRequest, opts ...grpc.CallOption) (*UpdateContainerResourcesResponse, error) { + out := new(UpdateContainerResourcesResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/UpdateContainerResources", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) ReopenContainerLog(ctx context.Context, in *ReopenContainerLogRequest, opts ...grpc.CallOption) (*ReopenContainerLogResponse, error) { + out := new(ReopenContainerLogResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/ReopenContainerLog", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) ExecSync(ctx context.Context, in *ExecSyncRequest, opts ...grpc.CallOption) (*ExecSyncResponse, error) { + out := new(ExecSyncResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/ExecSync", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) Exec(ctx context.Context, in *ExecRequest, opts ...grpc.CallOption) (*ExecResponse, error) { + out := new(ExecResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/Exec", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) Attach(ctx context.Context, in *AttachRequest, opts ...grpc.CallOption) (*AttachResponse, error) { + out := new(AttachResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/Attach", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) PortForward(ctx context.Context, in *PortForwardRequest, opts ...grpc.CallOption) (*PortForwardResponse, error) { + out := new(PortForwardResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/PortForward", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) ContainerStats(ctx context.Context, in *ContainerStatsRequest, opts ...grpc.CallOption) (*ContainerStatsResponse, error) { + out := new(ContainerStatsResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/ContainerStats", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) ListContainerStats(ctx context.Context, in *ListContainerStatsRequest, opts ...grpc.CallOption) (*ListContainerStatsResponse, error) { + out := new(ListContainerStatsResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/ListContainerStats", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) PodSandboxStats(ctx context.Context, in *PodSandboxStatsRequest, opts ...grpc.CallOption) (*PodSandboxStatsResponse, error) { + out := new(PodSandboxStatsResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/PodSandboxStats", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) ListPodSandboxStats(ctx context.Context, in *ListPodSandboxStatsRequest, opts ...grpc.CallOption) (*ListPodSandboxStatsResponse, error) { + out := new(ListPodSandboxStatsResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/ListPodSandboxStats", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) UpdateRuntimeConfig(ctx context.Context, in *UpdateRuntimeConfigRequest, opts ...grpc.CallOption) (*UpdateRuntimeConfigResponse, error) { + out := new(UpdateRuntimeConfigResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/UpdateRuntimeConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { + out := new(StatusResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/Status", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) CheckpointContainer(ctx context.Context, in *CheckpointContainerRequest, opts ...grpc.CallOption) (*CheckpointContainerResponse, error) { + out := new(CheckpointContainerResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/CheckpointContainer", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) GetContainerEvents(ctx context.Context, in *GetEventsRequest, opts ...grpc.CallOption) (RuntimeService_GetContainerEventsClient, error) { + stream, err := c.cc.NewStream(ctx, &_RuntimeService_serviceDesc.Streams[0], "/runtime.v1.RuntimeService/GetContainerEvents", opts...) + if err != nil { + return nil, err + } + x := &runtimeServiceGetContainerEventsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type RuntimeService_GetContainerEventsClient interface { + Recv() (*ContainerEventResponse, error) + grpc.ClientStream +} + +type runtimeServiceGetContainerEventsClient struct { + grpc.ClientStream +} + +func (x *runtimeServiceGetContainerEventsClient) Recv() (*ContainerEventResponse, error) { + m := new(ContainerEventResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *runtimeServiceClient) ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) { + out := new(ListMetricDescriptorsResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/ListMetricDescriptors", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) ListPodSandboxMetrics(ctx context.Context, in *ListPodSandboxMetricsRequest, opts ...grpc.CallOption) (*ListPodSandboxMetricsResponse, error) { + out := new(ListPodSandboxMetricsResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/ListPodSandboxMetrics", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *runtimeServiceClient) RuntimeConfig(ctx context.Context, in *RuntimeConfigRequest, opts ...grpc.CallOption) (*RuntimeConfigResponse, error) { + out := new(RuntimeConfigResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.RuntimeService/RuntimeConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RuntimeServiceServer is the server API for RuntimeService service. +type RuntimeServiceServer interface { + // Version returns the runtime name, runtime version, and runtime API version. + Version(context.Context, *VersionRequest) (*VersionResponse, error) + // RunPodSandbox creates and starts a pod-level sandbox. Runtimes must ensure + // the sandbox is in the ready state on success. + RunPodSandbox(context.Context, *RunPodSandboxRequest) (*RunPodSandboxResponse, error) + // StopPodSandbox stops any running process that is part of the sandbox and + // reclaims network resources (e.g., IP addresses) allocated to the sandbox. + // If there are any running containers in the sandbox, they must be forcibly + // terminated. + // This call is idempotent, and must not return an error if all relevant + // resources have already been reclaimed. kubelet will call StopPodSandbox + // at least once before calling RemovePodSandbox. It will also attempt to + // reclaim resources eagerly, as soon as a sandbox is not needed. Hence, + // multiple StopPodSandbox calls are expected. + StopPodSandbox(context.Context, *StopPodSandboxRequest) (*StopPodSandboxResponse, error) + // RemovePodSandbox removes the sandbox. If there are any running containers + // in the sandbox, they must be forcibly terminated and removed. + // This call is idempotent, and must not return an error if the sandbox has + // already been removed. + RemovePodSandbox(context.Context, *RemovePodSandboxRequest) (*RemovePodSandboxResponse, error) + // PodSandboxStatus returns the status of the PodSandbox. If the PodSandbox is not + // present, returns an error. + PodSandboxStatus(context.Context, *PodSandboxStatusRequest) (*PodSandboxStatusResponse, error) + // ListPodSandbox returns a list of PodSandboxes. + ListPodSandbox(context.Context, *ListPodSandboxRequest) (*ListPodSandboxResponse, error) + // CreateContainer creates a new container in specified PodSandbox + CreateContainer(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) + // StartContainer starts the container. + StartContainer(context.Context, *StartContainerRequest) (*StartContainerResponse, error) + // StopContainer stops a running container with a grace period (i.e., timeout). + // This call is idempotent, and must not return an error if the container has + // already been stopped. + // The runtime must forcibly kill the container after the grace period is + // reached. + StopContainer(context.Context, *StopContainerRequest) (*StopContainerResponse, error) + // RemoveContainer removes the container. If the container is running, the + // container must be forcibly removed. + // This call is idempotent, and must not return an error if the container has + // already been removed. + RemoveContainer(context.Context, *RemoveContainerRequest) (*RemoveContainerResponse, error) + // ListContainers lists all containers by filters. + ListContainers(context.Context, *ListContainersRequest) (*ListContainersResponse, error) + // ContainerStatus returns status of the container. If the container is not + // present, returns an error. + ContainerStatus(context.Context, *ContainerStatusRequest) (*ContainerStatusResponse, error) + // UpdateContainerResources updates ContainerConfig of the container synchronously. + // If runtime fails to transactionally update the requested resources, an error is returned. + UpdateContainerResources(context.Context, *UpdateContainerResourcesRequest) (*UpdateContainerResourcesResponse, error) + // ReopenContainerLog asks runtime to reopen the stdout/stderr log file + // for the container. This is often called after the log file has been + // rotated. If the container is not running, container runtime can choose + // to either create a new log file and return nil, or return an error. + // Once it returns error, new container log file MUST NOT be created. + ReopenContainerLog(context.Context, *ReopenContainerLogRequest) (*ReopenContainerLogResponse, error) + // ExecSync runs a command in a container synchronously. + ExecSync(context.Context, *ExecSyncRequest) (*ExecSyncResponse, error) + // Exec prepares a streaming endpoint to execute a command in the container. + Exec(context.Context, *ExecRequest) (*ExecResponse, error) + // Attach prepares a streaming endpoint to attach to a running container. + Attach(context.Context, *AttachRequest) (*AttachResponse, error) + // PortForward prepares a streaming endpoint to forward ports from a PodSandbox. + PortForward(context.Context, *PortForwardRequest) (*PortForwardResponse, error) + // ContainerStats returns stats of the container. If the container does not + // exist, the call returns an error. + ContainerStats(context.Context, *ContainerStatsRequest) (*ContainerStatsResponse, error) + // ListContainerStats returns stats of all running containers. + ListContainerStats(context.Context, *ListContainerStatsRequest) (*ListContainerStatsResponse, error) + // PodSandboxStats returns stats of the pod sandbox. If the pod sandbox does not + // exist, the call returns an error. + PodSandboxStats(context.Context, *PodSandboxStatsRequest) (*PodSandboxStatsResponse, error) + // ListPodSandboxStats returns stats of the pod sandboxes matching a filter. + ListPodSandboxStats(context.Context, *ListPodSandboxStatsRequest) (*ListPodSandboxStatsResponse, error) + // UpdateRuntimeConfig updates the runtime configuration based on the given request. + UpdateRuntimeConfig(context.Context, *UpdateRuntimeConfigRequest) (*UpdateRuntimeConfigResponse, error) + // Status returns the status of the runtime. + Status(context.Context, *StatusRequest) (*StatusResponse, error) + // CheckpointContainer checkpoints a container + CheckpointContainer(context.Context, *CheckpointContainerRequest) (*CheckpointContainerResponse, error) + // GetContainerEvents gets container events from the CRI runtime + GetContainerEvents(*GetEventsRequest, RuntimeService_GetContainerEventsServer) error + // ListMetricDescriptors gets the descriptors for the metrics that will be returned in ListPodSandboxMetrics. + // This list should be static at startup: either the client and server restart together when + // adding or removing metrics descriptors, or they should not change. + // Put differently, if ListPodSandboxMetrics references a name that is not described in the initial + // ListMetricDescriptors call, then the metric will not be broadcasted. + ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error) + // ListPodSandboxMetrics gets pod sandbox metrics from CRI Runtime + ListPodSandboxMetrics(context.Context, *ListPodSandboxMetricsRequest) (*ListPodSandboxMetricsResponse, error) + // RuntimeConfig returns configuration information of the runtime. + // A couple of notes: + // - The RuntimeConfigRequest object is not to be confused with the contents of UpdateRuntimeConfigRequest. + // The former is for having runtime tell Kubelet what to do, the latter vice versa. + // - It is the expectation of the Kubelet that these fields are static for the lifecycle of the Kubelet. + // The Kubelet will not re-request the RuntimeConfiguration after startup, and CRI implementations should + // avoid updating them without a full node reboot. + RuntimeConfig(context.Context, *RuntimeConfigRequest) (*RuntimeConfigResponse, error) +} + +// UnimplementedRuntimeServiceServer can be embedded to have forward compatible implementations. +type UnimplementedRuntimeServiceServer struct { +} + +func (*UnimplementedRuntimeServiceServer) Version(ctx context.Context, req *VersionRequest) (*VersionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") +} +func (*UnimplementedRuntimeServiceServer) RunPodSandbox(ctx context.Context, req *RunPodSandboxRequest) (*RunPodSandboxResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RunPodSandbox not implemented") +} +func (*UnimplementedRuntimeServiceServer) StopPodSandbox(ctx context.Context, req *StopPodSandboxRequest) (*StopPodSandboxResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StopPodSandbox not implemented") +} +func (*UnimplementedRuntimeServiceServer) RemovePodSandbox(ctx context.Context, req *RemovePodSandboxRequest) (*RemovePodSandboxResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemovePodSandbox not implemented") +} +func (*UnimplementedRuntimeServiceServer) PodSandboxStatus(ctx context.Context, req *PodSandboxStatusRequest) (*PodSandboxStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PodSandboxStatus not implemented") +} +func (*UnimplementedRuntimeServiceServer) ListPodSandbox(ctx context.Context, req *ListPodSandboxRequest) (*ListPodSandboxResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListPodSandbox not implemented") +} +func (*UnimplementedRuntimeServiceServer) CreateContainer(ctx context.Context, req *CreateContainerRequest) (*CreateContainerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateContainer not implemented") +} +func (*UnimplementedRuntimeServiceServer) StartContainer(ctx context.Context, req *StartContainerRequest) (*StartContainerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartContainer not implemented") +} +func (*UnimplementedRuntimeServiceServer) StopContainer(ctx context.Context, req *StopContainerRequest) (*StopContainerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StopContainer not implemented") +} +func (*UnimplementedRuntimeServiceServer) RemoveContainer(ctx context.Context, req *RemoveContainerRequest) (*RemoveContainerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemoveContainer not implemented") +} +func (*UnimplementedRuntimeServiceServer) ListContainers(ctx context.Context, req *ListContainersRequest) (*ListContainersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListContainers not implemented") +} +func (*UnimplementedRuntimeServiceServer) ContainerStatus(ctx context.Context, req *ContainerStatusRequest) (*ContainerStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ContainerStatus not implemented") +} +func (*UnimplementedRuntimeServiceServer) UpdateContainerResources(ctx context.Context, req *UpdateContainerResourcesRequest) (*UpdateContainerResourcesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateContainerResources not implemented") +} +func (*UnimplementedRuntimeServiceServer) ReopenContainerLog(ctx context.Context, req *ReopenContainerLogRequest) (*ReopenContainerLogResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReopenContainerLog not implemented") +} +func (*UnimplementedRuntimeServiceServer) ExecSync(ctx context.Context, req *ExecSyncRequest) (*ExecSyncResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ExecSync not implemented") +} +func (*UnimplementedRuntimeServiceServer) Exec(ctx context.Context, req *ExecRequest) (*ExecResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Exec not implemented") +} +func (*UnimplementedRuntimeServiceServer) Attach(ctx context.Context, req *AttachRequest) (*AttachResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Attach not implemented") +} +func (*UnimplementedRuntimeServiceServer) PortForward(ctx context.Context, req *PortForwardRequest) (*PortForwardResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PortForward not implemented") +} +func (*UnimplementedRuntimeServiceServer) ContainerStats(ctx context.Context, req *ContainerStatsRequest) (*ContainerStatsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ContainerStats not implemented") +} +func (*UnimplementedRuntimeServiceServer) ListContainerStats(ctx context.Context, req *ListContainerStatsRequest) (*ListContainerStatsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListContainerStats not implemented") +} +func (*UnimplementedRuntimeServiceServer) PodSandboxStats(ctx context.Context, req *PodSandboxStatsRequest) (*PodSandboxStatsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PodSandboxStats not implemented") +} +func (*UnimplementedRuntimeServiceServer) ListPodSandboxStats(ctx context.Context, req *ListPodSandboxStatsRequest) (*ListPodSandboxStatsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListPodSandboxStats not implemented") +} +func (*UnimplementedRuntimeServiceServer) UpdateRuntimeConfig(ctx context.Context, req *UpdateRuntimeConfigRequest) (*UpdateRuntimeConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateRuntimeConfig not implemented") +} +func (*UnimplementedRuntimeServiceServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") +} +func (*UnimplementedRuntimeServiceServer) CheckpointContainer(ctx context.Context, req *CheckpointContainerRequest) (*CheckpointContainerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CheckpointContainer not implemented") +} +func (*UnimplementedRuntimeServiceServer) GetContainerEvents(req *GetEventsRequest, srv RuntimeService_GetContainerEventsServer) error { + return status.Errorf(codes.Unimplemented, "method GetContainerEvents not implemented") +} +func (*UnimplementedRuntimeServiceServer) ListMetricDescriptors(ctx context.Context, req *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListMetricDescriptors not implemented") +} +func (*UnimplementedRuntimeServiceServer) ListPodSandboxMetrics(ctx context.Context, req *ListPodSandboxMetricsRequest) (*ListPodSandboxMetricsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListPodSandboxMetrics not implemented") +} +func (*UnimplementedRuntimeServiceServer) RuntimeConfig(ctx context.Context, req *RuntimeConfigRequest) (*RuntimeConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RuntimeConfig not implemented") +} + +func RegisterRuntimeServiceServer(s *grpc.Server, srv RuntimeServiceServer) { + s.RegisterService(&_RuntimeService_serviceDesc, srv) +} + +func _RuntimeService_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(VersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).Version(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/Version", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).Version(ctx, req.(*VersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_RunPodSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RunPodSandboxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).RunPodSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/RunPodSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).RunPodSandbox(ctx, req.(*RunPodSandboxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_StopPodSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StopPodSandboxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).StopPodSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/StopPodSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).StopPodSandbox(ctx, req.(*StopPodSandboxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_RemovePodSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemovePodSandboxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).RemovePodSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/RemovePodSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).RemovePodSandbox(ctx, req.(*RemovePodSandboxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_PodSandboxStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PodSandboxStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).PodSandboxStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/PodSandboxStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).PodSandboxStatus(ctx, req.(*PodSandboxStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_ListPodSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListPodSandboxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).ListPodSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/ListPodSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).ListPodSandbox(ctx, req.(*ListPodSandboxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_CreateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).CreateContainer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/CreateContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).CreateContainer(ctx, req.(*CreateContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_StartContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).StartContainer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/StartContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).StartContainer(ctx, req.(*StartContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_StopContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StopContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).StopContainer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/StopContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).StopContainer(ctx, req.(*StopContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_RemoveContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).RemoveContainer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/RemoveContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).RemoveContainer(ctx, req.(*RemoveContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_ListContainers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListContainersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).ListContainers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/ListContainers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).ListContainers(ctx, req.(*ListContainersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_ContainerStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ContainerStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).ContainerStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/ContainerStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).ContainerStatus(ctx, req.(*ContainerStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_UpdateContainerResources_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateContainerResourcesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).UpdateContainerResources(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/UpdateContainerResources", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).UpdateContainerResources(ctx, req.(*UpdateContainerResourcesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_ReopenContainerLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReopenContainerLogRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).ReopenContainerLog(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/ReopenContainerLog", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).ReopenContainerLog(ctx, req.(*ReopenContainerLogRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_ExecSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExecSyncRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).ExecSync(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/ExecSync", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).ExecSync(ctx, req.(*ExecSyncRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_Exec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExecRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).Exec(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/Exec", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).Exec(ctx, req.(*ExecRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_Attach_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AttachRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).Attach(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/Attach", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).Attach(ctx, req.(*AttachRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_PortForward_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PortForwardRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).PortForward(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/PortForward", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).PortForward(ctx, req.(*PortForwardRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_ContainerStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ContainerStatsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).ContainerStats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/ContainerStats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).ContainerStats(ctx, req.(*ContainerStatsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_ListContainerStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListContainerStatsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).ListContainerStats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/ListContainerStats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).ListContainerStats(ctx, req.(*ListContainerStatsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_PodSandboxStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PodSandboxStatsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).PodSandboxStats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/PodSandboxStats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).PodSandboxStats(ctx, req.(*PodSandboxStatsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_ListPodSandboxStats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListPodSandboxStatsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).ListPodSandboxStats(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/ListPodSandboxStats", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).ListPodSandboxStats(ctx, req.(*ListPodSandboxStatsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_UpdateRuntimeConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateRuntimeConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).UpdateRuntimeConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/UpdateRuntimeConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).UpdateRuntimeConfig(ctx, req.(*UpdateRuntimeConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).Status(ctx, req.(*StatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_CheckpointContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CheckpointContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).CheckpointContainer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/CheckpointContainer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).CheckpointContainer(ctx, req.(*CheckpointContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_GetContainerEvents_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(GetEventsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(RuntimeServiceServer).GetContainerEvents(m, &runtimeServiceGetContainerEventsServer{stream}) +} + +type RuntimeService_GetContainerEventsServer interface { + Send(*ContainerEventResponse) error + grpc.ServerStream +} + +type runtimeServiceGetContainerEventsServer struct { + grpc.ServerStream +} + +func (x *runtimeServiceGetContainerEventsServer) Send(m *ContainerEventResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _RuntimeService_ListMetricDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMetricDescriptorsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).ListMetricDescriptors(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/ListMetricDescriptors", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).ListMetricDescriptors(ctx, req.(*ListMetricDescriptorsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_ListPodSandboxMetrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListPodSandboxMetricsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).ListPodSandboxMetrics(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/ListPodSandboxMetrics", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).ListPodSandboxMetrics(ctx, req.(*ListPodSandboxMetricsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_RuntimeConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RuntimeConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).RuntimeConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.RuntimeService/RuntimeConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).RuntimeConfig(ctx, req.(*RuntimeConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _RuntimeService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "runtime.v1.RuntimeService", + HandlerType: (*RuntimeServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Version", + Handler: _RuntimeService_Version_Handler, + }, + { + MethodName: "RunPodSandbox", + Handler: _RuntimeService_RunPodSandbox_Handler, + }, + { + MethodName: "StopPodSandbox", + Handler: _RuntimeService_StopPodSandbox_Handler, + }, + { + MethodName: "RemovePodSandbox", + Handler: _RuntimeService_RemovePodSandbox_Handler, + }, + { + MethodName: "PodSandboxStatus", + Handler: _RuntimeService_PodSandboxStatus_Handler, + }, + { + MethodName: "ListPodSandbox", + Handler: _RuntimeService_ListPodSandbox_Handler, + }, + { + MethodName: "CreateContainer", + Handler: _RuntimeService_CreateContainer_Handler, + }, + { + MethodName: "StartContainer", + Handler: _RuntimeService_StartContainer_Handler, + }, + { + MethodName: "StopContainer", + Handler: _RuntimeService_StopContainer_Handler, + }, + { + MethodName: "RemoveContainer", + Handler: _RuntimeService_RemoveContainer_Handler, + }, + { + MethodName: "ListContainers", + Handler: _RuntimeService_ListContainers_Handler, + }, + { + MethodName: "ContainerStatus", + Handler: _RuntimeService_ContainerStatus_Handler, + }, + { + MethodName: "UpdateContainerResources", + Handler: _RuntimeService_UpdateContainerResources_Handler, + }, + { + MethodName: "ReopenContainerLog", + Handler: _RuntimeService_ReopenContainerLog_Handler, + }, + { + MethodName: "ExecSync", + Handler: _RuntimeService_ExecSync_Handler, + }, + { + MethodName: "Exec", + Handler: _RuntimeService_Exec_Handler, + }, + { + MethodName: "Attach", + Handler: _RuntimeService_Attach_Handler, + }, + { + MethodName: "PortForward", + Handler: _RuntimeService_PortForward_Handler, + }, + { + MethodName: "ContainerStats", + Handler: _RuntimeService_ContainerStats_Handler, + }, + { + MethodName: "ListContainerStats", + Handler: _RuntimeService_ListContainerStats_Handler, + }, + { + MethodName: "PodSandboxStats", + Handler: _RuntimeService_PodSandboxStats_Handler, + }, + { + MethodName: "ListPodSandboxStats", + Handler: _RuntimeService_ListPodSandboxStats_Handler, + }, + { + MethodName: "UpdateRuntimeConfig", + Handler: _RuntimeService_UpdateRuntimeConfig_Handler, + }, + { + MethodName: "Status", + Handler: _RuntimeService_Status_Handler, + }, + { + MethodName: "CheckpointContainer", + Handler: _RuntimeService_CheckpointContainer_Handler, + }, + { + MethodName: "ListMetricDescriptors", + Handler: _RuntimeService_ListMetricDescriptors_Handler, + }, + { + MethodName: "ListPodSandboxMetrics", + Handler: _RuntimeService_ListPodSandboxMetrics_Handler, + }, + { + MethodName: "RuntimeConfig", + Handler: _RuntimeService_RuntimeConfig_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "GetContainerEvents", + Handler: _RuntimeService_GetContainerEvents_Handler, + ServerStreams: true, + }, + }, + Metadata: "api.proto", +} + +// ImageServiceClient is the client API for ImageService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ImageServiceClient interface { + // ListImages lists existing images. + ListImages(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) + // ImageStatus returns the status of the image. If the image is not + // present, returns a response with ImageStatusResponse.Image set to + // nil. + ImageStatus(ctx context.Context, in *ImageStatusRequest, opts ...grpc.CallOption) (*ImageStatusResponse, error) + // PullImage pulls an image with authentication config. + PullImage(ctx context.Context, in *PullImageRequest, opts ...grpc.CallOption) (*PullImageResponse, error) + // RemoveImage removes the image. + // This call is idempotent, and must not return an error if the image has + // already been removed. + RemoveImage(ctx context.Context, in *RemoveImageRequest, opts ...grpc.CallOption) (*RemoveImageResponse, error) + // ImageFSInfo returns information of the filesystem that is used to store images. + ImageFsInfo(ctx context.Context, in *ImageFsInfoRequest, opts ...grpc.CallOption) (*ImageFsInfoResponse, error) +} + +type imageServiceClient struct { + cc *grpc.ClientConn +} + +func NewImageServiceClient(cc *grpc.ClientConn) ImageServiceClient { + return &imageServiceClient{cc} +} + +func (c *imageServiceClient) ListImages(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) { + out := new(ListImagesResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.ImageService/ListImages", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imageServiceClient) ImageStatus(ctx context.Context, in *ImageStatusRequest, opts ...grpc.CallOption) (*ImageStatusResponse, error) { + out := new(ImageStatusResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.ImageService/ImageStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imageServiceClient) PullImage(ctx context.Context, in *PullImageRequest, opts ...grpc.CallOption) (*PullImageResponse, error) { + out := new(PullImageResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.ImageService/PullImage", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imageServiceClient) RemoveImage(ctx context.Context, in *RemoveImageRequest, opts ...grpc.CallOption) (*RemoveImageResponse, error) { + out := new(RemoveImageResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.ImageService/RemoveImage", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imageServiceClient) ImageFsInfo(ctx context.Context, in *ImageFsInfoRequest, opts ...grpc.CallOption) (*ImageFsInfoResponse, error) { + out := new(ImageFsInfoResponse) + err := c.cc.Invoke(ctx, "/runtime.v1.ImageService/ImageFsInfo", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ImageServiceServer is the server API for ImageService service. +type ImageServiceServer interface { + // ListImages lists existing images. + ListImages(context.Context, *ListImagesRequest) (*ListImagesResponse, error) + // ImageStatus returns the status of the image. If the image is not + // present, returns a response with ImageStatusResponse.Image set to + // nil. + ImageStatus(context.Context, *ImageStatusRequest) (*ImageStatusResponse, error) + // PullImage pulls an image with authentication config. + PullImage(context.Context, *PullImageRequest) (*PullImageResponse, error) + // RemoveImage removes the image. + // This call is idempotent, and must not return an error if the image has + // already been removed. + RemoveImage(context.Context, *RemoveImageRequest) (*RemoveImageResponse, error) + // ImageFSInfo returns information of the filesystem that is used to store images. + ImageFsInfo(context.Context, *ImageFsInfoRequest) (*ImageFsInfoResponse, error) +} + +// UnimplementedImageServiceServer can be embedded to have forward compatible implementations. +type UnimplementedImageServiceServer struct { +} + +func (*UnimplementedImageServiceServer) ListImages(ctx context.Context, req *ListImagesRequest) (*ListImagesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListImages not implemented") +} +func (*UnimplementedImageServiceServer) ImageStatus(ctx context.Context, req *ImageStatusRequest) (*ImageStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ImageStatus not implemented") +} +func (*UnimplementedImageServiceServer) PullImage(ctx context.Context, req *PullImageRequest) (*PullImageResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PullImage not implemented") +} +func (*UnimplementedImageServiceServer) RemoveImage(ctx context.Context, req *RemoveImageRequest) (*RemoveImageResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemoveImage not implemented") +} +func (*UnimplementedImageServiceServer) ImageFsInfo(ctx context.Context, req *ImageFsInfoRequest) (*ImageFsInfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ImageFsInfo not implemented") +} + +func RegisterImageServiceServer(s *grpc.Server, srv ImageServiceServer) { + s.RegisterService(&_ImageService_serviceDesc, srv) +} + +func _ImageService_ListImages_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListImagesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImageServiceServer).ListImages(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.ImageService/ListImages", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageServiceServer).ListImages(ctx, req.(*ListImagesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ImageService_ImageStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ImageStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImageServiceServer).ImageStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.ImageService/ImageStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageServiceServer).ImageStatus(ctx, req.(*ImageStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ImageService_PullImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PullImageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImageServiceServer).PullImage(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.ImageService/PullImage", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageServiceServer).PullImage(ctx, req.(*PullImageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ImageService_RemoveImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveImageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImageServiceServer).RemoveImage(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.ImageService/RemoveImage", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageServiceServer).RemoveImage(ctx, req.(*RemoveImageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ImageService_ImageFsInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ImageFsInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImageServiceServer).ImageFsInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/runtime.v1.ImageService/ImageFsInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImageServiceServer).ImageFsInfo(ctx, req.(*ImageFsInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ImageService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "runtime.v1.ImageService", + HandlerType: (*ImageServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListImages", + Handler: _ImageService_ListImages_Handler, + }, + { + MethodName: "ImageStatus", + Handler: _ImageService_ImageStatus_Handler, + }, + { + MethodName: "PullImage", + Handler: _ImageService_PullImage_Handler, + }, + { + MethodName: "RemoveImage", + Handler: _ImageService_RemoveImage_Handler, + }, + { + MethodName: "ImageFsInfo", + Handler: _ImageService_ImageFsInfo_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "api.proto", +} + +func (m *VersionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VersionRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VersionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintApi(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VersionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VersionResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VersionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RuntimeApiVersion) > 0 { + i -= len(m.RuntimeApiVersion) + copy(dAtA[i:], m.RuntimeApiVersion) + i = encodeVarintApi(dAtA, i, uint64(len(m.RuntimeApiVersion))) + i-- + dAtA[i] = 0x22 + } + if len(m.RuntimeVersion) > 0 { + i -= len(m.RuntimeVersion) + copy(dAtA[i:], m.RuntimeVersion) + i = encodeVarintApi(dAtA, i, uint64(len(m.RuntimeVersion))) + i-- + dAtA[i] = 0x1a + } + if len(m.RuntimeName) > 0 { + i -= len(m.RuntimeName) + copy(dAtA[i:], m.RuntimeName) + i = encodeVarintApi(dAtA, i, uint64(len(m.RuntimeName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintApi(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DNSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DNSConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DNSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Options[iNdEx]) + copy(dAtA[i:], m.Options[iNdEx]) + i = encodeVarintApi(dAtA, i, uint64(len(m.Options[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Searches) > 0 { + for iNdEx := len(m.Searches) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Searches[iNdEx]) + copy(dAtA[i:], m.Searches[iNdEx]) + i = encodeVarintApi(dAtA, i, uint64(len(m.Searches[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Servers) > 0 { + for iNdEx := len(m.Servers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Servers[iNdEx]) + copy(dAtA[i:], m.Servers[iNdEx]) + i = encodeVarintApi(dAtA, i, uint64(len(m.Servers[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PortMapping) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortMapping) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PortMapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.HostIp) > 0 { + i -= len(m.HostIp) + copy(dAtA[i:], m.HostIp) + i = encodeVarintApi(dAtA, i, uint64(len(m.HostIp))) + i-- + dAtA[i] = 0x22 + } + if m.HostPort != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.HostPort)) + i-- + dAtA[i] = 0x18 + } + if m.ContainerPort != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.ContainerPort)) + i-- + dAtA[i] = 0x10 + } + if m.Protocol != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Protocol)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Mount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Mount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.GidMappings) > 0 { + for iNdEx := len(m.GidMappings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.GidMappings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.UidMappings) > 0 { + for iNdEx := len(m.UidMappings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.UidMappings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.Propagation != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Propagation)) + i-- + dAtA[i] = 0x28 + } + if m.SelinuxRelabel { + i-- + if m.SelinuxRelabel { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Readonly { + i-- + if m.Readonly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.HostPath) > 0 { + i -= len(m.HostPath) + copy(dAtA[i:], m.HostPath) + i = encodeVarintApi(dAtA, i, uint64(len(m.HostPath))) + i-- + dAtA[i] = 0x12 + } + if len(m.ContainerPath) > 0 { + i -= len(m.ContainerPath) + copy(dAtA[i:], m.ContainerPath) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerPath))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *IDMapping) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IDMapping) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IDMapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Length != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x18 + } + if m.ContainerId != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.ContainerId)) + i-- + dAtA[i] = 0x10 + } + if m.HostId != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.HostId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UserNamespace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserNamespace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserNamespace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Gids) > 0 { + for iNdEx := len(m.Gids) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Gids[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Uids) > 0 { + for iNdEx := len(m.Uids) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Uids[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Mode != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Mode)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *NamespaceOption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamespaceOption) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.UsernsOptions != nil { + { + size, err := m.UsernsOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.TargetId) > 0 { + i -= len(m.TargetId) + copy(dAtA[i:], m.TargetId) + i = encodeVarintApi(dAtA, i, uint64(len(m.TargetId))) + i-- + dAtA[i] = 0x22 + } + if m.Ipc != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Ipc)) + i-- + dAtA[i] = 0x18 + } + if m.Pid != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Pid)) + i-- + dAtA[i] = 0x10 + } + if m.Network != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Network)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Int64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Int64Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LinuxSandboxSecurityContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxSandboxSecurityContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LinuxSandboxSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Apparmor != nil { + { + size, err := m.Apparmor.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.Seccomp != nil { + { + size, err := m.Seccomp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.RunAsGroup != nil { + { + size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if len(m.SeccompProfilePath) > 0 { + i -= len(m.SeccompProfilePath) + copy(dAtA[i:], m.SeccompProfilePath) + i = encodeVarintApi(dAtA, i, uint64(len(m.SeccompProfilePath))) + i-- + dAtA[i] = 0x3a + } + if m.Privileged { + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.SupplementalGroups) > 0 { + dAtA6 := make([]byte, len(m.SupplementalGroups)*10) + var j5 int + for _, num1 := range m.SupplementalGroups { + num := uint64(num1) + for num >= 1<<7 { + dAtA6[j5] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j5++ + } + dAtA6[j5] = uint8(num) + j5++ + } + i -= j5 + copy(dAtA[i:], dAtA6[:j5]) + i = encodeVarintApi(dAtA, i, uint64(j5)) + i-- + dAtA[i] = 0x2a + } + if m.ReadonlyRootfs { + i-- + if m.ReadonlyRootfs { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.RunAsUser != nil { + { + size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.SelinuxOptions != nil { + { + size, err := m.SelinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.NamespaceOptions != nil { + { + size, err := m.NamespaceOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SecurityProfile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecurityProfile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecurityProfile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LocalhostRef) > 0 { + i -= len(m.LocalhostRef) + copy(dAtA[i:], m.LocalhostRef) + i = encodeVarintApi(dAtA, i, uint64(len(m.LocalhostRef))) + i-- + dAtA[i] = 0x12 + } + if m.ProfileType != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.ProfileType)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *LinuxPodSandboxConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxPodSandboxConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LinuxPodSandboxConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Resources != nil { + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Overhead != nil { + { + size, err := m.Overhead.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Sysctls) > 0 { + for k := range m.Sysctls { + v := m.Sysctls[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.CgroupParent) > 0 { + i -= len(m.CgroupParent) + copy(dAtA[i:], m.CgroupParent) + i = encodeVarintApi(dAtA, i, uint64(len(m.CgroupParent))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PodSandboxMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSandboxMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSandboxMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Attempt != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Attempt)) + i-- + dAtA[i] = 0x20 + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintApi(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + } + if len(m.Uid) > 0 { + i -= len(m.Uid) + copy(dAtA[i:], m.Uid) + i = encodeVarintApi(dAtA, i, uint64(len(m.Uid))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PodSandboxConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSandboxConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSandboxConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Windows != nil { + { + size, err := m.Windows.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.Linux != nil { + { + size, err := m.Linux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.PortMappings) > 0 { + for iNdEx := len(m.PortMappings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PortMappings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.DnsConfig != nil { + { + size, err := m.DnsConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.LogDirectory) > 0 { + i -= len(m.LogDirectory) + copy(dAtA[i:], m.LogDirectory) + i = encodeVarintApi(dAtA, i, uint64(len(m.LogDirectory))) + i-- + dAtA[i] = 0x1a + } + if len(m.Hostname) > 0 { + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintApi(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x12 + } + if m.Metadata != nil { + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RunPodSandboxRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunPodSandboxRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunPodSandboxRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RuntimeHandler) > 0 { + i -= len(m.RuntimeHandler) + copy(dAtA[i:], m.RuntimeHandler) + i = encodeVarintApi(dAtA, i, uint64(len(m.RuntimeHandler))) + i-- + dAtA[i] = 0x12 + } + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RunPodSandboxResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunPodSandboxResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunPodSandboxResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodSandboxId) > 0 { + i -= len(m.PodSandboxId) + copy(dAtA[i:], m.PodSandboxId) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodSandboxId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StopPodSandboxRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StopPodSandboxRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StopPodSandboxRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodSandboxId) > 0 { + i -= len(m.PodSandboxId) + copy(dAtA[i:], m.PodSandboxId) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodSandboxId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StopPodSandboxResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StopPodSandboxResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StopPodSandboxResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *RemovePodSandboxRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemovePodSandboxRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemovePodSandboxRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodSandboxId) > 0 { + i -= len(m.PodSandboxId) + copy(dAtA[i:], m.PodSandboxId) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodSandboxId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemovePodSandboxResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemovePodSandboxResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemovePodSandboxResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *PodSandboxStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSandboxStatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSandboxStatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Verbose { + i-- + if m.Verbose { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.PodSandboxId) > 0 { + i -= len(m.PodSandboxId) + copy(dAtA[i:], m.PodSandboxId) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodSandboxId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PodIP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodIP) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodIP) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ip) > 0 { + i -= len(m.Ip) + copy(dAtA[i:], m.Ip) + i = encodeVarintApi(dAtA, i, uint64(len(m.Ip))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PodSandboxNetworkStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSandboxNetworkStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSandboxNetworkStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AdditionalIps) > 0 { + for iNdEx := len(m.AdditionalIps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AdditionalIps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Ip) > 0 { + i -= len(m.Ip) + copy(dAtA[i:], m.Ip) + i = encodeVarintApi(dAtA, i, uint64(len(m.Ip))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Namespace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Namespace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Options != nil { + { + size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *LinuxPodSandboxStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxPodSandboxStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LinuxPodSandboxStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Namespaces != nil { + { + size, err := m.Namespaces.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PodSandboxStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSandboxStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSandboxStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RuntimeHandler) > 0 { + i -= len(m.RuntimeHandler) + copy(dAtA[i:], m.RuntimeHandler) + i = encodeVarintApi(dAtA, i, uint64(len(m.RuntimeHandler))) + i-- + dAtA[i] = 0x4a + } + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x42 + } + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if m.Linux != nil { + { + size, err := m.Linux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Network != nil { + { + size, err := m.Network.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.CreatedAt != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x20 + } + if m.State != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x18 + } + if m.Metadata != nil { + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintApi(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PodSandboxStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSandboxStatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSandboxStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timestamp != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x20 + } + if len(m.ContainersStatuses) > 0 { + for iNdEx := len(m.ContainersStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ContainersStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Info) > 0 { + for k := range m.Info { + v := m.Info[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PodSandboxStateValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSandboxStateValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSandboxStateValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.State != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *PodSandboxFilter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSandboxFilter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSandboxFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LabelSelector) > 0 { + for k := range m.LabelSelector { + v := m.LabelSelector[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if m.State != nil { + { + size, err := m.State.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintApi(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListPodSandboxRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListPodSandboxRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListPodSandboxRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PodSandbox) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSandbox) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSandbox) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RuntimeHandler) > 0 { + i -= len(m.RuntimeHandler) + copy(dAtA[i:], m.RuntimeHandler) + i = encodeVarintApi(dAtA, i, uint64(len(m.RuntimeHandler))) + i-- + dAtA[i] = 0x3a + } + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if m.CreatedAt != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x20 + } + if m.State != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x18 + } + if m.Metadata != nil { + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintApi(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListPodSandboxResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListPodSandboxResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListPodSandboxResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PodSandboxStatsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSandboxStatsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSandboxStatsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodSandboxId) > 0 { + i -= len(m.PodSandboxId) + copy(dAtA[i:], m.PodSandboxId) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodSandboxId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PodSandboxStatsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSandboxStatsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSandboxStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Stats != nil { + { + size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PodSandboxStatsFilter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSandboxStatsFilter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSandboxStatsFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LabelSelector) > 0 { + for k := range m.LabelSelector { + v := m.LabelSelector[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintApi(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListPodSandboxStatsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListPodSandboxStatsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListPodSandboxStatsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListPodSandboxStatsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListPodSandboxStatsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListPodSandboxStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Stats) > 0 { + for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Stats[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PodSandboxAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSandboxAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSandboxAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if m.Metadata != nil { + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintApi(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PodSandboxStats) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSandboxStats) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSandboxStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Windows != nil { + { + size, err := m.Windows.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Linux != nil { + { + size, err := m.Linux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Attributes != nil { + { + size, err := m.Attributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LinuxPodSandboxStats) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxPodSandboxStats) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LinuxPodSandboxStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.Process != nil { + { + size, err := m.Process.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Network != nil { + { + size, err := m.Network.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Memory != nil { + { + size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Cpu != nil { + { + size, err := m.Cpu.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WindowsPodSandboxStats) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WindowsPodSandboxStats) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsPodSandboxStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.Process != nil { + { + size, err := m.Process.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Network != nil { + { + size, err := m.Network.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Memory != nil { + { + size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Cpu != nil { + { + size, err := m.Cpu.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NetworkUsage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkUsage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Interfaces) > 0 { + for iNdEx := len(m.Interfaces) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Interfaces[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.DefaultInterface != nil { + { + size, err := m.DefaultInterface.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Timestamp != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *WindowsNetworkUsage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WindowsNetworkUsage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsNetworkUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Interfaces) > 0 { + for iNdEx := len(m.Interfaces) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Interfaces[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.DefaultInterface != nil { + { + size, err := m.DefaultInterface.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Timestamp != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *NetworkInterfaceUsage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkInterfaceUsage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkInterfaceUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TxErrors != nil { + { + size, err := m.TxErrors.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.TxBytes != nil { + { + size, err := m.TxBytes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.RxErrors != nil { + { + size, err := m.RxErrors.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.RxBytes != nil { + { + size, err := m.RxBytes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WindowsNetworkInterfaceUsage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WindowsNetworkInterfaceUsage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsNetworkInterfaceUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TxPacketsDropped != nil { + { + size, err := m.TxPacketsDropped.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.TxBytes != nil { + { + size, err := m.TxBytes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.RxPacketsDropped != nil { + { + size, err := m.RxPacketsDropped.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.RxBytes != nil { + { + size, err := m.RxBytes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ProcessUsage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProcessUsage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProcessUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ProcessCount != nil { + { + size, err := m.ProcessCount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Timestamp != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *WindowsProcessUsage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WindowsProcessUsage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsProcessUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ProcessCount != nil { + { + size, err := m.ProcessCount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Timestamp != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ImageSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RuntimeHandler) > 0 { + i -= len(m.RuntimeHandler) + copy(dAtA[i:], m.RuntimeHandler) + i = encodeVarintApi(dAtA, i, uint64(len(m.RuntimeHandler))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if len(m.UserSpecifiedImage) > 0 { + i -= len(m.UserSpecifiedImage) + copy(dAtA[i:], m.UserSpecifiedImage) + i = encodeVarintApi(dAtA, i, uint64(len(m.UserSpecifiedImage))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Image) > 0 { + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintApi(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *KeyValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintApi(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintApi(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LinuxContainerResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxContainerResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LinuxContainerResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MemorySwapLimitInBytes != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.MemorySwapLimitInBytes)) + i-- + dAtA[i] = 0x50 + } + if len(m.Unified) > 0 { + for k := range m.Unified { + v := m.Unified[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x4a + } + } + if len(m.HugepageLimits) > 0 { + for iNdEx := len(m.HugepageLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HugepageLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.CpusetMems) > 0 { + i -= len(m.CpusetMems) + copy(dAtA[i:], m.CpusetMems) + i = encodeVarintApi(dAtA, i, uint64(len(m.CpusetMems))) + i-- + dAtA[i] = 0x3a + } + if len(m.CpusetCpus) > 0 { + i -= len(m.CpusetCpus) + copy(dAtA[i:], m.CpusetCpus) + i = encodeVarintApi(dAtA, i, uint64(len(m.CpusetCpus))) + i-- + dAtA[i] = 0x32 + } + if m.OomScoreAdj != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.OomScoreAdj)) + i-- + dAtA[i] = 0x28 + } + if m.MemoryLimitInBytes != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.MemoryLimitInBytes)) + i-- + dAtA[i] = 0x20 + } + if m.CpuShares != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.CpuShares)) + i-- + dAtA[i] = 0x18 + } + if m.CpuQuota != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.CpuQuota)) + i-- + dAtA[i] = 0x10 + } + if m.CpuPeriod != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.CpuPeriod)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HugepageLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HugepageLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HugepageLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Limit != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x10 + } + if len(m.PageSize) > 0 { + i -= len(m.PageSize) + copy(dAtA[i:], m.PageSize) + i = encodeVarintApi(dAtA, i, uint64(len(m.PageSize))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SELinuxOption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SELinuxOption) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Level) > 0 { + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintApi(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0x22 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintApi(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x1a + } + if len(m.Role) > 0 { + i -= len(m.Role) + copy(dAtA[i:], m.Role) + i = encodeVarintApi(dAtA, i, uint64(len(m.Role))) + i-- + dAtA[i] = 0x12 + } + if len(m.User) > 0 { + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintApi(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Capability) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Capability) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Capability) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AddAmbientCapabilities) > 0 { + for iNdEx := len(m.AddAmbientCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AddAmbientCapabilities[iNdEx]) + copy(dAtA[i:], m.AddAmbientCapabilities[iNdEx]) + i = encodeVarintApi(dAtA, i, uint64(len(m.AddAmbientCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.DropCapabilities) > 0 { + for iNdEx := len(m.DropCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DropCapabilities[iNdEx]) + copy(dAtA[i:], m.DropCapabilities[iNdEx]) + i = encodeVarintApi(dAtA, i, uint64(len(m.DropCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.AddCapabilities) > 0 { + for iNdEx := len(m.AddCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AddCapabilities[iNdEx]) + copy(dAtA[i:], m.AddCapabilities[iNdEx]) + i = encodeVarintApi(dAtA, i, uint64(len(m.AddCapabilities[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LinuxContainerSecurityContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxContainerSecurityContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LinuxContainerSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Apparmor != nil { + { + size, err := m.Apparmor.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Seccomp != nil { + { + size, err := m.Seccomp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if len(m.ReadonlyPaths) > 0 { + for iNdEx := len(m.ReadonlyPaths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ReadonlyPaths[iNdEx]) + copy(dAtA[i:], m.ReadonlyPaths[iNdEx]) + i = encodeVarintApi(dAtA, i, uint64(len(m.ReadonlyPaths[iNdEx]))) + i-- + dAtA[i] = 0x72 + } + } + if len(m.MaskedPaths) > 0 { + for iNdEx := len(m.MaskedPaths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MaskedPaths[iNdEx]) + copy(dAtA[i:], m.MaskedPaths[iNdEx]) + i = encodeVarintApi(dAtA, i, uint64(len(m.MaskedPaths[iNdEx]))) + i-- + dAtA[i] = 0x6a + } + } + if m.RunAsGroup != nil { + { + size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.NoNewPrivs { + i-- + if m.NoNewPrivs { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + } + if len(m.SeccompProfilePath) > 0 { + i -= len(m.SeccompProfilePath) + copy(dAtA[i:], m.SeccompProfilePath) + i = encodeVarintApi(dAtA, i, uint64(len(m.SeccompProfilePath))) + i-- + dAtA[i] = 0x52 + } + if len(m.ApparmorProfile) > 0 { + i -= len(m.ApparmorProfile) + copy(dAtA[i:], m.ApparmorProfile) + i = encodeVarintApi(dAtA, i, uint64(len(m.ApparmorProfile))) + i-- + dAtA[i] = 0x4a + } + if len(m.SupplementalGroups) > 0 { + dAtA57 := make([]byte, len(m.SupplementalGroups)*10) + var j56 int + for _, num1 := range m.SupplementalGroups { + num := uint64(num1) + for num >= 1<<7 { + dAtA57[j56] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j56++ + } + dAtA57[j56] = uint8(num) + j56++ + } + i -= j56 + copy(dAtA[i:], dAtA57[:j56]) + i = encodeVarintApi(dAtA, i, uint64(j56)) + i-- + dAtA[i] = 0x42 + } + if m.ReadonlyRootfs { + i-- + if m.ReadonlyRootfs { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if len(m.RunAsUsername) > 0 { + i -= len(m.RunAsUsername) + copy(dAtA[i:], m.RunAsUsername) + i = encodeVarintApi(dAtA, i, uint64(len(m.RunAsUsername))) + i-- + dAtA[i] = 0x32 + } + if m.RunAsUser != nil { + { + size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.SelinuxOptions != nil { + { + size, err := m.SelinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.NamespaceOptions != nil { + { + size, err := m.NamespaceOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Privileged { + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Capabilities != nil { + { + size, err := m.Capabilities.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LinuxContainerConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxContainerConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LinuxContainerConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Resources != nil { + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WindowsNamespaceOption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WindowsNamespaceOption) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsNamespaceOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Network != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Network)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *WindowsSandboxSecurityContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WindowsSandboxSecurityContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsSandboxSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NamespaceOptions != nil { + { + size, err := m.NamespaceOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.HostProcess { + i-- + if m.HostProcess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.CredentialSpec) > 0 { + i -= len(m.CredentialSpec) + copy(dAtA[i:], m.CredentialSpec) + i = encodeVarintApi(dAtA, i, uint64(len(m.CredentialSpec))) + i-- + dAtA[i] = 0x12 + } + if len(m.RunAsUsername) > 0 { + i -= len(m.RunAsUsername) + copy(dAtA[i:], m.RunAsUsername) + i = encodeVarintApi(dAtA, i, uint64(len(m.RunAsUsername))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WindowsPodSandboxConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WindowsPodSandboxConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsPodSandboxConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WindowsContainerSecurityContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WindowsContainerSecurityContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsContainerSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HostProcess { + i-- + if m.HostProcess { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.CredentialSpec) > 0 { + i -= len(m.CredentialSpec) + copy(dAtA[i:], m.CredentialSpec) + i = encodeVarintApi(dAtA, i, uint64(len(m.CredentialSpec))) + i-- + dAtA[i] = 0x12 + } + if len(m.RunAsUsername) > 0 { + i -= len(m.RunAsUsername) + copy(dAtA[i:], m.RunAsUsername) + i = encodeVarintApi(dAtA, i, uint64(len(m.RunAsUsername))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WindowsContainerConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WindowsContainerConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsContainerConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Resources != nil { + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WindowsContainerResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WindowsContainerResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsContainerResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RootfsSizeInBytes != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.RootfsSizeInBytes)) + i-- + dAtA[i] = 0x28 + } + if m.MemoryLimitInBytes != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.MemoryLimitInBytes)) + i-- + dAtA[i] = 0x20 + } + if m.CpuMaximum != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.CpuMaximum)) + i-- + dAtA[i] = 0x18 + } + if m.CpuCount != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.CpuCount)) + i-- + dAtA[i] = 0x10 + } + if m.CpuShares != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.CpuShares)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ContainerMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Attempt != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Attempt)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Device) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Device) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Device) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Permissions) > 0 { + i -= len(m.Permissions) + copy(dAtA[i:], m.Permissions) + i = encodeVarintApi(dAtA, i, uint64(len(m.Permissions))) + i-- + dAtA[i] = 0x1a + } + if len(m.HostPath) > 0 { + i -= len(m.HostPath) + copy(dAtA[i:], m.HostPath) + i = encodeVarintApi(dAtA, i, uint64(len(m.HostPath))) + i-- + dAtA[i] = 0x12 + } + if len(m.ContainerPath) > 0 { + i -= len(m.ContainerPath) + copy(dAtA[i:], m.ContainerPath) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerPath))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CDIDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CDIDevice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CDIDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ContainerConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.CDIDevices) > 0 { + for iNdEx := len(m.CDIDevices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CDIDevices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } + if m.Windows != nil { + { + size, err := m.Windows.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Linux != nil { + { + size, err := m.Linux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if m.Tty { + i-- + if m.Tty { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } + if m.StdinOnce { + i-- + if m.StdinOnce { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + } + if m.Stdin { + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + } + if len(m.LogPath) > 0 { + i -= len(m.LogPath) + copy(dAtA[i:], m.LogPath) + i = encodeVarintApi(dAtA, i, uint64(len(m.LogPath))) + i-- + dAtA[i] = 0x5a + } + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x4a + } + } + if len(m.Devices) > 0 { + for iNdEx := len(m.Devices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Devices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.Mounts) > 0 { + for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.Envs) > 0 { + for iNdEx := len(m.Envs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Envs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.WorkingDir) > 0 { + i -= len(m.WorkingDir) + copy(dAtA[i:], m.WorkingDir) + i = encodeVarintApi(dAtA, i, uint64(len(m.WorkingDir))) + i-- + dAtA[i] = 0x2a + } + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintApi(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintApi(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Metadata != nil { + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CreateContainerRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateContainerRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CreateContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SandboxConfig != nil { + { + size, err := m.SandboxConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.PodSandboxId) > 0 { + i -= len(m.PodSandboxId) + copy(dAtA[i:], m.PodSandboxId) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodSandboxId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CreateContainerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateContainerResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CreateContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ContainerId) > 0 { + i -= len(m.ContainerId) + copy(dAtA[i:], m.ContainerId) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StartContainerRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartContainerRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ContainerId) > 0 { + i -= len(m.ContainerId) + copy(dAtA[i:], m.ContainerId) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StartContainerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StartContainerResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StartContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *StopContainerRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StopContainerRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StopContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timeout != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Timeout)) + i-- + dAtA[i] = 0x10 + } + if len(m.ContainerId) > 0 { + i -= len(m.ContainerId) + copy(dAtA[i:], m.ContainerId) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StopContainerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StopContainerResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StopContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *RemoveContainerRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveContainerRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ContainerId) > 0 { + i -= len(m.ContainerId) + copy(dAtA[i:], m.ContainerId) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveContainerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveContainerResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *ContainerStateValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStateValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStateValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.State != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ContainerFilter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerFilter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LabelSelector) > 0 { + for k := range m.LabelSelector { + v := m.LabelSelector[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.PodSandboxId) > 0 { + i -= len(m.PodSandboxId) + copy(dAtA[i:], m.PodSandboxId) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodSandboxId))) + i-- + dAtA[i] = 0x1a + } + if m.State != nil { + { + size, err := m.State.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintApi(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListContainersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListContainersRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListContainersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Container) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Container) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x4a + } + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x42 + } + } + if m.CreatedAt != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x38 + } + if m.State != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x30 + } + if len(m.ImageRef) > 0 { + i -= len(m.ImageRef) + copy(dAtA[i:], m.ImageRef) + i = encodeVarintApi(dAtA, i, uint64(len(m.ImageRef))) + i-- + dAtA[i] = 0x2a + } + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Metadata != nil { + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.PodSandboxId) > 0 { + i -= len(m.PodSandboxId) + copy(dAtA[i:], m.PodSandboxId) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodSandboxId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintApi(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListContainersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListContainersResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListContainersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ContainerStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Verbose { + i-- + if m.Verbose { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.ContainerId) > 0 { + i -= len(m.ContainerId) + copy(dAtA[i:], m.ContainerId) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Resources != nil { + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if len(m.LogPath) > 0 { + i -= len(m.LogPath) + copy(dAtA[i:], m.LogPath) + i = encodeVarintApi(dAtA, i, uint64(len(m.LogPath))) + i-- + dAtA[i] = 0x7a + } + if len(m.Mounts) > 0 { + for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + } + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a + } + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintApi(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x5a + } + if len(m.Reason) > 0 { + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintApi(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x52 + } + if len(m.ImageRef) > 0 { + i -= len(m.ImageRef) + copy(dAtA[i:], m.ImageRef) + i = encodeVarintApi(dAtA, i, uint64(len(m.ImageRef))) + i-- + dAtA[i] = 0x4a + } + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.ExitCode != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.ExitCode)) + i-- + dAtA[i] = 0x38 + } + if m.FinishedAt != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.FinishedAt)) + i-- + dAtA[i] = 0x30 + } + if m.StartedAt != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.StartedAt)) + i-- + dAtA[i] = 0x28 + } + if m.CreatedAt != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x20 + } + if m.State != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x18 + } + if m.Metadata != nil { + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintApi(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ContainerStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Info) > 0 { + for k := range m.Info { + v := m.Info[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ContainerResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Windows != nil { + { + size, err := m.Windows.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Linux != nil { + { + size, err := m.Linux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateContainerResourcesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateContainerResourcesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateContainerResourcesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.Windows != nil { + { + size, err := m.Windows.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Linux != nil { + { + size, err := m.Linux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ContainerId) > 0 { + i -= len(m.ContainerId) + copy(dAtA[i:], m.ContainerId) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateContainerResourcesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateContainerResourcesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateContainerResourcesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *ExecSyncRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecSyncRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecSyncRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timeout != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Timeout)) + i-- + dAtA[i] = 0x18 + } + if len(m.Cmd) > 0 { + for iNdEx := len(m.Cmd) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cmd[iNdEx]) + copy(dAtA[i:], m.Cmd[iNdEx]) + i = encodeVarintApi(dAtA, i, uint64(len(m.Cmd[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ContainerId) > 0 { + i -= len(m.ContainerId) + copy(dAtA[i:], m.ContainerId) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecSyncResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecSyncResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecSyncResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ExitCode != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.ExitCode)) + i-- + dAtA[i] = 0x18 + } + if len(m.Stderr) > 0 { + i -= len(m.Stderr) + copy(dAtA[i:], m.Stderr) + i = encodeVarintApi(dAtA, i, uint64(len(m.Stderr))) + i-- + dAtA[i] = 0x12 + } + if len(m.Stdout) > 0 { + i -= len(m.Stdout) + copy(dAtA[i:], m.Stdout) + i = encodeVarintApi(dAtA, i, uint64(len(m.Stdout))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Stderr { + i-- + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.Stdout { + i-- + if m.Stdout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Stdin { + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Tty { + i-- + if m.Tty { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Cmd) > 0 { + for iNdEx := len(m.Cmd) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Cmd[iNdEx]) + copy(dAtA[i:], m.Cmd[iNdEx]) + i = encodeVarintApi(dAtA, i, uint64(len(m.Cmd[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ContainerId) > 0 { + i -= len(m.ContainerId) + copy(dAtA[i:], m.ContainerId) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Url) > 0 { + i -= len(m.Url) + copy(dAtA[i:], m.Url) + i = encodeVarintApi(dAtA, i, uint64(len(m.Url))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AttachRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttachRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AttachRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Stderr { + i-- + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Stdout { + i-- + if m.Stdout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.Tty { + i-- + if m.Tty { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Stdin { + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.ContainerId) > 0 { + i -= len(m.ContainerId) + copy(dAtA[i:], m.ContainerId) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AttachResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttachResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AttachResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Url) > 0 { + i -= len(m.Url) + copy(dAtA[i:], m.Url) + i = encodeVarintApi(dAtA, i, uint64(len(m.Url))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PortForwardRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortForwardRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PortForwardRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Port) > 0 { + dAtA87 := make([]byte, len(m.Port)*10) + var j86 int + for _, num1 := range m.Port { + num := uint64(num1) + for num >= 1<<7 { + dAtA87[j86] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j86++ + } + dAtA87[j86] = uint8(num) + j86++ + } + i -= j86 + copy(dAtA[i:], dAtA87[:j86]) + i = encodeVarintApi(dAtA, i, uint64(j86)) + i-- + dAtA[i] = 0x12 + } + if len(m.PodSandboxId) > 0 { + i -= len(m.PodSandboxId) + copy(dAtA[i:], m.PodSandboxId) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodSandboxId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PortForwardResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortForwardResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PortForwardResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Url) > 0 { + i -= len(m.Url) + copy(dAtA[i:], m.Url) + i = encodeVarintApi(dAtA, i, uint64(len(m.Url))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ImageFilter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageFilter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListImagesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListImagesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListImagesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Image) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Image) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Image) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pinned { + i-- + if m.Pinned { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.Spec != nil { + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if len(m.Username) > 0 { + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintApi(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0x32 + } + if m.Uid != nil { + { + size, err := m.Uid.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Size_ != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Size_)) + i-- + dAtA[i] = 0x20 + } + if len(m.RepoDigests) > 0 { + for iNdEx := len(m.RepoDigests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RepoDigests[iNdEx]) + copy(dAtA[i:], m.RepoDigests[iNdEx]) + i = encodeVarintApi(dAtA, i, uint64(len(m.RepoDigests[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.RepoTags) > 0 { + for iNdEx := len(m.RepoTags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RepoTags[iNdEx]) + copy(dAtA[i:], m.RepoTags[iNdEx]) + i = encodeVarintApi(dAtA, i, uint64(len(m.RepoTags[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintApi(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListImagesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListImagesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListImagesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ImageStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Verbose { + i-- + if m.Verbose { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ImageStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Info) > 0 { + for k := range m.Info { + v := m.Info[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuthConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuthConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuthConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RegistryToken) > 0 { + i -= len(m.RegistryToken) + copy(dAtA[i:], m.RegistryToken) + i = encodeVarintApi(dAtA, i, uint64(len(m.RegistryToken))) + i-- + dAtA[i] = 0x32 + } + if len(m.IdentityToken) > 0 { + i -= len(m.IdentityToken) + copy(dAtA[i:], m.IdentityToken) + i = encodeVarintApi(dAtA, i, uint64(len(m.IdentityToken))) + i-- + dAtA[i] = 0x2a + } + if len(m.ServerAddress) > 0 { + i -= len(m.ServerAddress) + copy(dAtA[i:], m.ServerAddress) + i = encodeVarintApi(dAtA, i, uint64(len(m.ServerAddress))) + i-- + dAtA[i] = 0x22 + } + if len(m.Auth) > 0 { + i -= len(m.Auth) + copy(dAtA[i:], m.Auth) + i = encodeVarintApi(dAtA, i, uint64(len(m.Auth))) + i-- + dAtA[i] = 0x1a + } + if len(m.Password) > 0 { + i -= len(m.Password) + copy(dAtA[i:], m.Password) + i = encodeVarintApi(dAtA, i, uint64(len(m.Password))) + i-- + dAtA[i] = 0x12 + } + if len(m.Username) > 0 { + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintApi(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PullImageRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PullImageRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PullImageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SandboxConfig != nil { + { + size, err := m.SandboxConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Auth != nil { + { + size, err := m.Auth.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PullImageResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PullImageResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PullImageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ImageRef) > 0 { + i -= len(m.ImageRef) + copy(dAtA[i:], m.ImageRef) + i = encodeVarintApi(dAtA, i, uint64(len(m.ImageRef))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveImageRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveImageRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveImageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveImageResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveImageResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveImageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *NetworkConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodCidr) > 0 { + i -= len(m.PodCidr) + copy(dAtA[i:], m.PodCidr) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodCidr))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuntimeConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NetworkConfig != nil { + { + size, err := m.NetworkConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateRuntimeConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateRuntimeConfigRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateRuntimeConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RuntimeConfig != nil { + { + size, err := m.RuntimeConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *UpdateRuntimeConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateRuntimeConfigResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateRuntimeConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *RuntimeCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintApi(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + } + if len(m.Reason) > 0 { + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintApi(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + } + if m.Status { + i-- + if m.Status { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintApi(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuntimeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Verbose { + i-- + if m.Verbose { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Info) > 0 { + for k := range m.Info { + v := m.Info[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ImageFsInfoRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageFsInfoRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageFsInfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *UInt64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt64Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UInt64Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *FilesystemIdentifier) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilesystemIdentifier) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FilesystemIdentifier) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Mountpoint) > 0 { + i -= len(m.Mountpoint) + copy(dAtA[i:], m.Mountpoint) + i = encodeVarintApi(dAtA, i, uint64(len(m.Mountpoint))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FilesystemUsage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FilesystemUsage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FilesystemUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.InodesUsed != nil { + { + size, err := m.InodesUsed.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.UsedBytes != nil { + { + size, err := m.UsedBytes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.FsId != nil { + { + size, err := m.FsId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Timestamp != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *WindowsFilesystemUsage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WindowsFilesystemUsage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsFilesystemUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.UsedBytes != nil { + { + size, err := m.UsedBytes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.FsId != nil { + { + size, err := m.FsId.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Timestamp != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ImageFsInfoResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageFsInfoResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageFsInfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ContainerFilesystems) > 0 { + for iNdEx := len(m.ContainerFilesystems) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ContainerFilesystems[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.ImageFilesystems) > 0 { + for iNdEx := len(m.ImageFilesystems) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImageFilesystems[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ContainerStatsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStatsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStatsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ContainerId) > 0 { + i -= len(m.ContainerId) + copy(dAtA[i:], m.ContainerId) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ContainerStatsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStatsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Stats != nil { + { + size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListContainerStatsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListContainerStatsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListContainerStatsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ContainerStatsFilter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStatsFilter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStatsFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LabelSelector) > 0 { + for k := range m.LabelSelector { + v := m.LabelSelector[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.PodSandboxId) > 0 { + i -= len(m.PodSandboxId) + copy(dAtA[i:], m.PodSandboxId) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodSandboxId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintApi(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListContainerStatsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListContainerStatsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListContainerStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Stats) > 0 { + for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Stats[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ContainerAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Annotations) > 0 { + for k := range m.Annotations { + v := m.Annotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Labels) > 0 { + for k := range m.Labels { + v := m.Labels[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintApi(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if m.Metadata != nil { + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintApi(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ContainerStats) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStats) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Swap != nil { + { + size, err := m.Swap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.WritableLayer != nil { + { + size, err := m.WritableLayer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Memory != nil { + { + size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Cpu != nil { + { + size, err := m.Cpu.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Attributes != nil { + { + size, err := m.Attributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WindowsContainerStats) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WindowsContainerStats) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsContainerStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.WritableLayer != nil { + { + size, err := m.WritableLayer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Memory != nil { + { + size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Cpu != nil { + { + size, err := m.Cpu.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Attributes != nil { + { + size, err := m.Attributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CpuUsage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CpuUsage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CpuUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.UsageNanoCores != nil { + { + size, err := m.UsageNanoCores.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.UsageCoreNanoSeconds != nil { + { + size, err := m.UsageCoreNanoSeconds.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Timestamp != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *WindowsCpuUsage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WindowsCpuUsage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsCpuUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.UsageNanoCores != nil { + { + size, err := m.UsageNanoCores.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.UsageCoreNanoSeconds != nil { + { + size, err := m.UsageCoreNanoSeconds.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Timestamp != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MemoryUsage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MemoryUsage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MemoryUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MajorPageFaults != nil { + { + size, err := m.MajorPageFaults.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.PageFaults != nil { + { + size, err := m.PageFaults.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.RssBytes != nil { + { + size, err := m.RssBytes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.UsageBytes != nil { + { + size, err := m.UsageBytes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AvailableBytes != nil { + { + size, err := m.AvailableBytes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.WorkingSetBytes != nil { + { + size, err := m.WorkingSetBytes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Timestamp != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SwapUsage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SwapUsage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SwapUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SwapUsageBytes != nil { + { + size, err := m.SwapUsageBytes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.SwapAvailableBytes != nil { + { + size, err := m.SwapAvailableBytes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Timestamp != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *WindowsMemoryUsage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WindowsMemoryUsage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsMemoryUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CommitMemoryBytes != nil { + { + size, err := m.CommitMemoryBytes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.PageFaults != nil { + { + size, err := m.PageFaults.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AvailableBytes != nil { + { + size, err := m.AvailableBytes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.WorkingSetBytes != nil { + { + size, err := m.WorkingSetBytes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Timestamp != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ReopenContainerLogRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReopenContainerLogRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReopenContainerLogRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ContainerId) > 0 { + i -= len(m.ContainerId) + copy(dAtA[i:], m.ContainerId) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ReopenContainerLogResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReopenContainerLogResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReopenContainerLogResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *CheckpointContainerRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckpointContainerRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CheckpointContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timeout != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Timeout)) + i-- + dAtA[i] = 0x18 + } + if len(m.Location) > 0 { + i -= len(m.Location) + copy(dAtA[i:], m.Location) + i = encodeVarintApi(dAtA, i, uint64(len(m.Location))) + i-- + dAtA[i] = 0x12 + } + if len(m.ContainerId) > 0 { + i -= len(m.ContainerId) + copy(dAtA[i:], m.ContainerId) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CheckpointContainerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CheckpointContainerResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CheckpointContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *GetEventsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetEventsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetEventsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *ContainerEventResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerEventResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerEventResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ContainersStatuses) > 0 { + for iNdEx := len(m.ContainersStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ContainersStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.PodSandboxStatus != nil { + { + size, err := m.PodSandboxStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.CreatedAt != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x18 + } + if m.ContainerEventType != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.ContainerEventType)) + i-- + dAtA[i] = 0x10 + } + if len(m.ContainerId) > 0 { + i -= len(m.ContainerId) + copy(dAtA[i:], m.ContainerId) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListMetricDescriptorsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListMetricDescriptorsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListMetricDescriptorsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *ListMetricDescriptorsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListMetricDescriptorsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListMetricDescriptorsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Descriptors) > 0 { + for iNdEx := len(m.Descriptors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Descriptors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MetricDescriptor) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricDescriptor) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricDescriptor) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.LabelKeys) > 0 { + for iNdEx := len(m.LabelKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LabelKeys[iNdEx]) + copy(dAtA[i:], m.LabelKeys[iNdEx]) + i = encodeVarintApi(dAtA, i, uint64(len(m.LabelKeys[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Help) > 0 { + i -= len(m.Help) + copy(dAtA[i:], m.Help) + i = encodeVarintApi(dAtA, i, uint64(len(m.Help))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ListPodSandboxMetricsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListPodSandboxMetricsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListPodSandboxMetricsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *ListPodSandboxMetricsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListPodSandboxMetricsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListPodSandboxMetricsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodMetrics) > 0 { + for iNdEx := len(m.PodMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PodMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PodSandboxMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSandboxMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSandboxMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ContainerMetrics) > 0 { + for iNdEx := len(m.ContainerMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ContainerMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.PodSandboxId) > 0 { + i -= len(m.PodSandboxId) + copy(dAtA[i:], m.PodSandboxId) + i = encodeVarintApi(dAtA, i, uint64(len(m.PodSandboxId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ContainerMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.ContainerId) > 0 { + i -= len(m.ContainerId) + copy(dAtA[i:], m.ContainerId) + i = encodeVarintApi(dAtA, i, uint64(len(m.ContainerId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Metric) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metric) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.LabelValues) > 0 { + for iNdEx := len(m.LabelValues) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LabelValues[iNdEx]) + copy(dAtA[i:], m.LabelValues[iNdEx]) + i = encodeVarintApi(dAtA, i, uint64(len(m.LabelValues[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.MetricType != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.MetricType)) + i-- + dAtA[i] = 0x18 + } + if m.Timestamp != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RuntimeConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeConfigRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *RuntimeConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeConfigResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Linux != nil { + { + size, err := m.Linux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LinuxRuntimeConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LinuxRuntimeConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LinuxRuntimeConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CgroupDriver != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.CgroupDriver)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + offset -= sovApi(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *VersionRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *VersionResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.RuntimeName) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.RuntimeVersion) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.RuntimeApiVersion) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *DNSConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Servers) > 0 { + for _, s := range m.Servers { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.Searches) > 0 { + for _, s := range m.Searches { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, s := range m.Options { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *PortMapping) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Protocol != 0 { + n += 1 + sovApi(uint64(m.Protocol)) + } + if m.ContainerPort != 0 { + n += 1 + sovApi(uint64(m.ContainerPort)) + } + if m.HostPort != 0 { + n += 1 + sovApi(uint64(m.HostPort)) + } + l = len(m.HostIp) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *Mount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerPath) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.HostPath) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Readonly { + n += 2 + } + if m.SelinuxRelabel { + n += 2 + } + if m.Propagation != 0 { + n += 1 + sovApi(uint64(m.Propagation)) + } + if len(m.UidMappings) > 0 { + for _, e := range m.UidMappings { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.GidMappings) > 0 { + for _, e := range m.GidMappings { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *IDMapping) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HostId != 0 { + n += 1 + sovApi(uint64(m.HostId)) + } + if m.ContainerId != 0 { + n += 1 + sovApi(uint64(m.ContainerId)) + } + if m.Length != 0 { + n += 1 + sovApi(uint64(m.Length)) + } + return n +} + +func (m *UserNamespace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Mode != 0 { + n += 1 + sovApi(uint64(m.Mode)) + } + if len(m.Uids) > 0 { + for _, e := range m.Uids { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.Gids) > 0 { + for _, e := range m.Gids { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *NamespaceOption) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Network != 0 { + n += 1 + sovApi(uint64(m.Network)) + } + if m.Pid != 0 { + n += 1 + sovApi(uint64(m.Pid)) + } + if m.Ipc != 0 { + n += 1 + sovApi(uint64(m.Ipc)) + } + l = len(m.TargetId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.UsernsOptions != nil { + l = m.UsernsOptions.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *Int64Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovApi(uint64(m.Value)) + } + return n +} + +func (m *LinuxSandboxSecurityContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NamespaceOptions != nil { + l = m.NamespaceOptions.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.SelinuxOptions != nil { + l = m.SelinuxOptions.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.RunAsUser != nil { + l = m.RunAsUser.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.ReadonlyRootfs { + n += 2 + } + if len(m.SupplementalGroups) > 0 { + l = 0 + for _, e := range m.SupplementalGroups { + l += sovApi(uint64(e)) + } + n += 1 + sovApi(uint64(l)) + l + } + if m.Privileged { + n += 2 + } + l = len(m.SeccompProfilePath) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.RunAsGroup != nil { + l = m.RunAsGroup.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Seccomp != nil { + l = m.Seccomp.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Apparmor != nil { + l = m.Apparmor.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *SecurityProfile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProfileType != 0 { + n += 1 + sovApi(uint64(m.ProfileType)) + } + l = len(m.LocalhostRef) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *LinuxPodSandboxConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CgroupParent) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Sysctls) > 0 { + for k, v := range m.Sysctls { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if m.Overhead != nil { + l = m.Overhead.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *PodSandboxMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Uid) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Attempt != 0 { + n += 1 + sovApi(uint64(m.Attempt)) + } + return n +} + +func (m *PodSandboxConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.LogDirectory) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.DnsConfig != nil { + l = m.DnsConfig.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.PortMappings) > 0 { + for _, e := range m.PortMappings { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if m.Linux != nil { + l = m.Linux.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Windows != nil { + l = m.Windows.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *RunPodSandboxRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.RuntimeHandler) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *RunPodSandboxResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodSandboxId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *StopPodSandboxRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodSandboxId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *StopPodSandboxResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *RemovePodSandboxRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodSandboxId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *RemovePodSandboxResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *PodSandboxStatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodSandboxId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Verbose { + n += 2 + } + return n +} + +func (m *PodIP) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ip) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *PodSandboxNetworkStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Ip) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.AdditionalIps) > 0 { + for _, e := range m.AdditionalIps { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *Namespace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *LinuxPodSandboxStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Namespaces != nil { + l = m.Namespaces.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *PodSandboxStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.State != 0 { + n += 1 + sovApi(uint64(m.State)) + } + if m.CreatedAt != 0 { + n += 1 + sovApi(uint64(m.CreatedAt)) + } + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Linux != nil { + l = m.Linux.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + l = len(m.RuntimeHandler) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *PodSandboxStatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Info) > 0 { + for k, v := range m.Info { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if len(m.ContainersStatuses) > 0 { + for _, e := range m.ContainersStatuses { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Timestamp != 0 { + n += 1 + sovApi(uint64(m.Timestamp)) + } + return n +} + +func (m *PodSandboxStateValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.State != 0 { + n += 1 + sovApi(uint64(m.State)) + } + return n +} + +func (m *PodSandboxFilter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.State != nil { + l = m.State.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.LabelSelector) > 0 { + for k, v := range m.LabelSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ListPodSandboxRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *PodSandbox) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.State != 0 { + n += 1 + sovApi(uint64(m.State)) + } + if m.CreatedAt != 0 { + n += 1 + sovApi(uint64(m.CreatedAt)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + l = len(m.RuntimeHandler) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *ListPodSandboxResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *PodSandboxStatsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodSandboxId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *PodSandboxStatsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Stats != nil { + l = m.Stats.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *PodSandboxStatsFilter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.LabelSelector) > 0 { + for k, v := range m.LabelSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ListPodSandboxStatsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *ListPodSandboxStatsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Stats) > 0 { + for _, e := range m.Stats { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *PodSandboxAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *PodSandboxStats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Attributes != nil { + l = m.Attributes.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Linux != nil { + l = m.Linux.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Windows != nil { + l = m.Windows.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *LinuxPodSandboxStats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cpu != nil { + l = m.Cpu.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Memory != nil { + l = m.Memory.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Process != nil { + l = m.Process.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *WindowsPodSandboxStats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Cpu != nil { + l = m.Cpu.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Memory != nil { + l = m.Memory.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Process != nil { + l = m.Process.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *NetworkUsage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + sovApi(uint64(m.Timestamp)) + } + if m.DefaultInterface != nil { + l = m.DefaultInterface.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Interfaces) > 0 { + for _, e := range m.Interfaces { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *WindowsNetworkUsage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + sovApi(uint64(m.Timestamp)) + } + if m.DefaultInterface != nil { + l = m.DefaultInterface.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Interfaces) > 0 { + for _, e := range m.Interfaces { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *NetworkInterfaceUsage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.RxBytes != nil { + l = m.RxBytes.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.RxErrors != nil { + l = m.RxErrors.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.TxBytes != nil { + l = m.TxBytes.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.TxErrors != nil { + l = m.TxErrors.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *WindowsNetworkInterfaceUsage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.RxBytes != nil { + l = m.RxBytes.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.RxPacketsDropped != nil { + l = m.RxPacketsDropped.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.TxBytes != nil { + l = m.TxBytes.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.TxPacketsDropped != nil { + l = m.TxPacketsDropped.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *ProcessUsage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + sovApi(uint64(m.Timestamp)) + } + if m.ProcessCount != nil { + l = m.ProcessCount.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *WindowsProcessUsage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + sovApi(uint64(m.Timestamp)) + } + if m.ProcessCount != nil { + l = m.ProcessCount.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *ImageSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Image) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + l = len(m.UserSpecifiedImage) + if l > 0 { + n += 2 + l + sovApi(uint64(l)) + } + l = len(m.RuntimeHandler) + if l > 0 { + n += 2 + l + sovApi(uint64(l)) + } + return n +} + +func (m *KeyValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *LinuxContainerResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CpuPeriod != 0 { + n += 1 + sovApi(uint64(m.CpuPeriod)) + } + if m.CpuQuota != 0 { + n += 1 + sovApi(uint64(m.CpuQuota)) + } + if m.CpuShares != 0 { + n += 1 + sovApi(uint64(m.CpuShares)) + } + if m.MemoryLimitInBytes != 0 { + n += 1 + sovApi(uint64(m.MemoryLimitInBytes)) + } + if m.OomScoreAdj != 0 { + n += 1 + sovApi(uint64(m.OomScoreAdj)) + } + l = len(m.CpusetCpus) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.CpusetMems) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.HugepageLimits) > 0 { + for _, e := range m.HugepageLimits { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.Unified) > 0 { + for k, v := range m.Unified { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if m.MemorySwapLimitInBytes != 0 { + n += 1 + sovApi(uint64(m.MemorySwapLimitInBytes)) + } + return n +} + +func (m *HugepageLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PageSize) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sovApi(uint64(m.Limit)) + } + return n +} + +func (m *SELinuxOption) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.User) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Role) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Level) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *Capability) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AddCapabilities) > 0 { + for _, s := range m.AddCapabilities { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.DropCapabilities) > 0 { + for _, s := range m.DropCapabilities { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.AddAmbientCapabilities) > 0 { + for _, s := range m.AddAmbientCapabilities { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *LinuxContainerSecurityContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Capabilities != nil { + l = m.Capabilities.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Privileged { + n += 2 + } + if m.NamespaceOptions != nil { + l = m.NamespaceOptions.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.SelinuxOptions != nil { + l = m.SelinuxOptions.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.RunAsUser != nil { + l = m.RunAsUser.Size() + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.RunAsUsername) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.ReadonlyRootfs { + n += 2 + } + if len(m.SupplementalGroups) > 0 { + l = 0 + for _, e := range m.SupplementalGroups { + l += sovApi(uint64(e)) + } + n += 1 + sovApi(uint64(l)) + l + } + l = len(m.ApparmorProfile) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.SeccompProfilePath) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.NoNewPrivs { + n += 2 + } + if m.RunAsGroup != nil { + l = m.RunAsGroup.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.MaskedPaths) > 0 { + for _, s := range m.MaskedPaths { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.ReadonlyPaths) > 0 { + for _, s := range m.ReadonlyPaths { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Seccomp != nil { + l = m.Seccomp.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Apparmor != nil { + l = m.Apparmor.Size() + n += 2 + l + sovApi(uint64(l)) + } + return n +} + +func (m *LinuxContainerConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *WindowsNamespaceOption) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Network != 0 { + n += 1 + sovApi(uint64(m.Network)) + } + return n +} + +func (m *WindowsSandboxSecurityContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RunAsUsername) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.CredentialSpec) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.HostProcess { + n += 2 + } + if m.NamespaceOptions != nil { + l = m.NamespaceOptions.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *WindowsPodSandboxConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *WindowsContainerSecurityContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RunAsUsername) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.CredentialSpec) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.HostProcess { + n += 2 + } + return n +} + +func (m *WindowsContainerConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *WindowsContainerResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CpuShares != 0 { + n += 1 + sovApi(uint64(m.CpuShares)) + } + if m.CpuCount != 0 { + n += 1 + sovApi(uint64(m.CpuCount)) + } + if m.CpuMaximum != 0 { + n += 1 + sovApi(uint64(m.CpuMaximum)) + } + if m.MemoryLimitInBytes != 0 { + n += 1 + sovApi(uint64(m.MemoryLimitInBytes)) + } + if m.RootfsSizeInBytes != 0 { + n += 1 + sovApi(uint64(m.RootfsSizeInBytes)) + } + return n +} + +func (m *ContainerMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Attempt != 0 { + n += 1 + sovApi(uint64(m.Attempt)) + } + return n +} + +func (m *Device) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerPath) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.HostPath) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Permissions) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *CDIDevice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *ContainerConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Image != nil { + l = m.Image.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + l = len(m.WorkingDir) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Envs) > 0 { + for _, e := range m.Envs { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.Mounts) > 0 { + for _, e := range m.Mounts { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.Devices) > 0 { + for _, e := range m.Devices { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + l = len(m.LogPath) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Stdin { + n += 2 + } + if m.StdinOnce { + n += 2 + } + if m.Tty { + n += 2 + } + if m.Linux != nil { + l = m.Linux.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Windows != nil { + l = m.Windows.Size() + n += 2 + l + sovApi(uint64(l)) + } + if len(m.CDIDevices) > 0 { + for _, e := range m.CDIDevices { + l = e.Size() + n += 2 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *CreateContainerRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodSandboxId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.SandboxConfig != nil { + l = m.SandboxConfig.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *CreateContainerResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *StartContainerRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *StartContainerResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *StopContainerRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Timeout != 0 { + n += 1 + sovApi(uint64(m.Timeout)) + } + return n +} + +func (m *StopContainerResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *RemoveContainerRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *RemoveContainerResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *ContainerStateValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.State != 0 { + n += 1 + sovApi(uint64(m.State)) + } + return n +} + +func (m *ContainerFilter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.State != nil { + l = m.State.Size() + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodSandboxId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.LabelSelector) > 0 { + for k, v := range m.LabelSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ListContainersRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *Container) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodSandboxId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Image != nil { + l = m.Image.Size() + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.ImageRef) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.State != 0 { + n += 1 + sovApi(uint64(m.State)) + } + if m.CreatedAt != 0 { + n += 1 + sovApi(uint64(m.CreatedAt)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ListContainersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *ContainerStatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Verbose { + n += 2 + } + return n +} + +func (m *ContainerStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.State != 0 { + n += 1 + sovApi(uint64(m.State)) + } + if m.CreatedAt != 0 { + n += 1 + sovApi(uint64(m.CreatedAt)) + } + if m.StartedAt != 0 { + n += 1 + sovApi(uint64(m.StartedAt)) + } + if m.FinishedAt != 0 { + n += 1 + sovApi(uint64(m.FinishedAt)) + } + if m.ExitCode != 0 { + n += 1 + sovApi(uint64(m.ExitCode)) + } + if m.Image != nil { + l = m.Image.Size() + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.ImageRef) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Reason) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if len(m.Mounts) > 0 { + for _, e := range m.Mounts { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + l = len(m.LogPath) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Resources != nil { + l = m.Resources.Size() + n += 2 + l + sovApi(uint64(l)) + } + return n +} + +func (m *ContainerStatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Info) > 0 { + for k, v := range m.Info { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ContainerResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Linux != nil { + l = m.Linux.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Windows != nil { + l = m.Windows.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *UpdateContainerResourcesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Linux != nil { + l = m.Linux.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Windows != nil { + l = m.Windows.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *UpdateContainerResourcesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *ExecSyncRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Cmd) > 0 { + for _, s := range m.Cmd { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Timeout != 0 { + n += 1 + sovApi(uint64(m.Timeout)) + } + return n +} + +func (m *ExecSyncResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Stdout) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Stderr) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.ExitCode != 0 { + n += 1 + sovApi(uint64(m.ExitCode)) + } + return n +} + +func (m *ExecRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Cmd) > 0 { + for _, s := range m.Cmd { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Tty { + n += 2 + } + if m.Stdin { + n += 2 + } + if m.Stdout { + n += 2 + } + if m.Stderr { + n += 2 + } + return n +} + +func (m *ExecResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Url) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *AttachRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Stdin { + n += 2 + } + if m.Tty { + n += 2 + } + if m.Stdout { + n += 2 + } + if m.Stderr { + n += 2 + } + return n +} + +func (m *AttachResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Url) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *PortForwardRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodSandboxId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Port) > 0 { + l = 0 + for _, e := range m.Port { + l += sovApi(uint64(e)) + } + n += 1 + sovApi(uint64(l)) + l + } + return n +} + +func (m *PortForwardResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Url) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *ImageFilter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Image != nil { + l = m.Image.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *ListImagesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *Image) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.RepoTags) > 0 { + for _, s := range m.RepoTags { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.RepoDigests) > 0 { + for _, s := range m.RepoDigests { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Size_ != 0 { + n += 1 + sovApi(uint64(m.Size_)) + } + if m.Uid != nil { + l = m.Uid.Size() + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Username) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Pinned { + n += 2 + } + return n +} + +func (m *ListImagesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *ImageStatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Image != nil { + l = m.Image.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Verbose { + n += 2 + } + return n +} + +func (m *ImageStatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Image != nil { + l = m.Image.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Info) > 0 { + for k, v := range m.Info { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *AuthConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Username) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Auth) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.ServerAddress) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.IdentityToken) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.RegistryToken) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *PullImageRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Image != nil { + l = m.Image.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Auth != nil { + l = m.Auth.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.SandboxConfig != nil { + l = m.SandboxConfig.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *PullImageResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ImageRef) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *RemoveImageRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Image != nil { + l = m.Image.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *RemoveImageResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *NetworkConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodCidr) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *RuntimeConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NetworkConfig != nil { + l = m.NetworkConfig.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *UpdateRuntimeConfigRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RuntimeConfig != nil { + l = m.RuntimeConfig.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *UpdateRuntimeConfigResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *RuntimeCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Status { + n += 2 + } + l = len(m.Reason) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *RuntimeStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *StatusRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Verbose { + n += 2 + } + return n +} + +func (m *StatusResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Info) > 0 { + for k, v := range m.Info { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ImageFsInfoRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *UInt64Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovApi(uint64(m.Value)) + } + return n +} + +func (m *FilesystemIdentifier) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Mountpoint) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *FilesystemUsage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + sovApi(uint64(m.Timestamp)) + } + if m.FsId != nil { + l = m.FsId.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.UsedBytes != nil { + l = m.UsedBytes.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.InodesUsed != nil { + l = m.InodesUsed.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *WindowsFilesystemUsage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + sovApi(uint64(m.Timestamp)) + } + if m.FsId != nil { + l = m.FsId.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.UsedBytes != nil { + l = m.UsedBytes.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *ImageFsInfoResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ImageFilesystems) > 0 { + for _, e := range m.ImageFilesystems { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.ContainerFilesystems) > 0 { + for _, e := range m.ContainerFilesystems { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *ContainerStatsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *ContainerStatsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Stats != nil { + l = m.Stats.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *ListContainerStatsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *ContainerStatsFilter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.PodSandboxId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.LabelSelector) > 0 { + for k, v := range m.LabelSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ListContainerStatsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Stats) > 0 { + for _, e := range m.Stats { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *ContainerAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + 1 + len(v) + sovApi(uint64(len(v))) + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ContainerStats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Attributes != nil { + l = m.Attributes.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Cpu != nil { + l = m.Cpu.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Memory != nil { + l = m.Memory.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.WritableLayer != nil { + l = m.WritableLayer.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Swap != nil { + l = m.Swap.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *WindowsContainerStats) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Attributes != nil { + l = m.Attributes.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Cpu != nil { + l = m.Cpu.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.Memory != nil { + l = m.Memory.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.WritableLayer != nil { + l = m.WritableLayer.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *CpuUsage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + sovApi(uint64(m.Timestamp)) + } + if m.UsageCoreNanoSeconds != nil { + l = m.UsageCoreNanoSeconds.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.UsageNanoCores != nil { + l = m.UsageNanoCores.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *WindowsCpuUsage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + sovApi(uint64(m.Timestamp)) + } + if m.UsageCoreNanoSeconds != nil { + l = m.UsageCoreNanoSeconds.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.UsageNanoCores != nil { + l = m.UsageNanoCores.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *MemoryUsage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + sovApi(uint64(m.Timestamp)) + } + if m.WorkingSetBytes != nil { + l = m.WorkingSetBytes.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.AvailableBytes != nil { + l = m.AvailableBytes.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.UsageBytes != nil { + l = m.UsageBytes.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.RssBytes != nil { + l = m.RssBytes.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.PageFaults != nil { + l = m.PageFaults.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.MajorPageFaults != nil { + l = m.MajorPageFaults.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *SwapUsage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + sovApi(uint64(m.Timestamp)) + } + if m.SwapAvailableBytes != nil { + l = m.SwapAvailableBytes.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.SwapUsageBytes != nil { + l = m.SwapUsageBytes.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *WindowsMemoryUsage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Timestamp != 0 { + n += 1 + sovApi(uint64(m.Timestamp)) + } + if m.WorkingSetBytes != nil { + l = m.WorkingSetBytes.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.AvailableBytes != nil { + l = m.AvailableBytes.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.PageFaults != nil { + l = m.PageFaults.Size() + n += 1 + l + sovApi(uint64(l)) + } + if m.CommitMemoryBytes != nil { + l = m.CommitMemoryBytes.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *ReopenContainerLogRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *ReopenContainerLogResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *CheckpointContainerRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Location) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Timeout != 0 { + n += 1 + sovApi(uint64(m.Timeout)) + } + return n +} + +func (m *CheckpointContainerResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *GetEventsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *ContainerEventResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.ContainerEventType != 0 { + n += 1 + sovApi(uint64(m.ContainerEventType)) + } + if m.CreatedAt != 0 { + n += 1 + sovApi(uint64(m.CreatedAt)) + } + if m.PodSandboxStatus != nil { + l = m.PodSandboxStatus.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.ContainersStatuses) > 0 { + for _, e := range m.ContainersStatuses { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *ListMetricDescriptorsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *ListMetricDescriptorsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Descriptors) > 0 { + for _, e := range m.Descriptors { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *MetricDescriptor) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Help) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.LabelKeys) > 0 { + for _, s := range m.LabelKeys { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *ListPodSandboxMetricsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *ListPodSandboxMetricsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PodMetrics) > 0 { + for _, e := range m.PodMetrics { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *PodSandboxMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodSandboxId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Metrics) > 0 { + for _, e := range m.Metrics { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.ContainerMetrics) > 0 { + for _, e := range m.ContainerMetrics { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *ContainerMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerId) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Metrics) > 0 { + for _, e := range m.Metrics { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *Metric) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.Timestamp != 0 { + n += 1 + sovApi(uint64(m.Timestamp)) + } + if m.MetricType != 0 { + n += 1 + sovApi(uint64(m.MetricType)) + } + if len(m.LabelValues) > 0 { + for _, s := range m.LabelValues { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Value != nil { + l = m.Value.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *RuntimeConfigRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *RuntimeConfigResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Linux != nil { + l = m.Linux.Size() + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *LinuxRuntimeConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CgroupDriver != 0 { + n += 1 + sovApi(uint64(m.CgroupDriver)) + } + return n +} + +func sovApi(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozApi(x uint64) (n int) { + return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *VersionRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VersionRequest{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `}`, + }, "") + return s +} +func (this *VersionResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VersionResponse{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `RuntimeName:` + fmt.Sprintf("%v", this.RuntimeName) + `,`, + `RuntimeVersion:` + fmt.Sprintf("%v", this.RuntimeVersion) + `,`, + `RuntimeApiVersion:` + fmt.Sprintf("%v", this.RuntimeApiVersion) + `,`, + `}`, + }, "") + return s +} +func (this *DNSConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DNSConfig{`, + `Servers:` + fmt.Sprintf("%v", this.Servers) + `,`, + `Searches:` + fmt.Sprintf("%v", this.Searches) + `,`, + `Options:` + fmt.Sprintf("%v", this.Options) + `,`, + `}`, + }, "") + return s +} +func (this *PortMapping) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortMapping{`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, + `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, + `HostIp:` + fmt.Sprintf("%v", this.HostIp) + `,`, + `}`, + }, "") + return s +} +func (this *Mount) String() string { + if this == nil { + return "nil" + } + repeatedStringForUidMappings := "[]*IDMapping{" + for _, f := range this.UidMappings { + repeatedStringForUidMappings += strings.Replace(f.String(), "IDMapping", "IDMapping", 1) + "," + } + repeatedStringForUidMappings += "}" + repeatedStringForGidMappings := "[]*IDMapping{" + for _, f := range this.GidMappings { + repeatedStringForGidMappings += strings.Replace(f.String(), "IDMapping", "IDMapping", 1) + "," + } + repeatedStringForGidMappings += "}" + s := strings.Join([]string{`&Mount{`, + `ContainerPath:` + fmt.Sprintf("%v", this.ContainerPath) + `,`, + `HostPath:` + fmt.Sprintf("%v", this.HostPath) + `,`, + `Readonly:` + fmt.Sprintf("%v", this.Readonly) + `,`, + `SelinuxRelabel:` + fmt.Sprintf("%v", this.SelinuxRelabel) + `,`, + `Propagation:` + fmt.Sprintf("%v", this.Propagation) + `,`, + `UidMappings:` + repeatedStringForUidMappings + `,`, + `GidMappings:` + repeatedStringForGidMappings + `,`, + `}`, + }, "") + return s +} +func (this *IDMapping) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IDMapping{`, + `HostId:` + fmt.Sprintf("%v", this.HostId) + `,`, + `ContainerId:` + fmt.Sprintf("%v", this.ContainerId) + `,`, + `Length:` + fmt.Sprintf("%v", this.Length) + `,`, + `}`, + }, "") + return s +} +func (this *UserNamespace) String() string { + if this == nil { + return "nil" + } + repeatedStringForUids := "[]*IDMapping{" + for _, f := range this.Uids { + repeatedStringForUids += strings.Replace(f.String(), "IDMapping", "IDMapping", 1) + "," + } + repeatedStringForUids += "}" + repeatedStringForGids := "[]*IDMapping{" + for _, f := range this.Gids { + repeatedStringForGids += strings.Replace(f.String(), "IDMapping", "IDMapping", 1) + "," + } + repeatedStringForGids += "}" + s := strings.Join([]string{`&UserNamespace{`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `Uids:` + repeatedStringForUids + `,`, + `Gids:` + repeatedStringForGids + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceOption) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceOption{`, + `Network:` + fmt.Sprintf("%v", this.Network) + `,`, + `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, + `Ipc:` + fmt.Sprintf("%v", this.Ipc) + `,`, + `TargetId:` + fmt.Sprintf("%v", this.TargetId) + `,`, + `UsernsOptions:` + strings.Replace(this.UsernsOptions.String(), "UserNamespace", "UserNamespace", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Int64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *LinuxSandboxSecurityContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LinuxSandboxSecurityContext{`, + `NamespaceOptions:` + strings.Replace(this.NamespaceOptions.String(), "NamespaceOption", "NamespaceOption", 1) + `,`, + `SelinuxOptions:` + strings.Replace(this.SelinuxOptions.String(), "SELinuxOption", "SELinuxOption", 1) + `,`, + `RunAsUser:` + strings.Replace(this.RunAsUser.String(), "Int64Value", "Int64Value", 1) + `,`, + `ReadonlyRootfs:` + fmt.Sprintf("%v", this.ReadonlyRootfs) + `,`, + `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, + `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, + `SeccompProfilePath:` + fmt.Sprintf("%v", this.SeccompProfilePath) + `,`, + `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "Int64Value", "Int64Value", 1) + `,`, + `Seccomp:` + strings.Replace(this.Seccomp.String(), "SecurityProfile", "SecurityProfile", 1) + `,`, + `Apparmor:` + strings.Replace(this.Apparmor.String(), "SecurityProfile", "SecurityProfile", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SecurityProfile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecurityProfile{`, + `ProfileType:` + fmt.Sprintf("%v", this.ProfileType) + `,`, + `LocalhostRef:` + fmt.Sprintf("%v", this.LocalhostRef) + `,`, + `}`, + }, "") + return s +} +func (this *LinuxPodSandboxConfig) String() string { + if this == nil { + return "nil" + } + keysForSysctls := make([]string, 0, len(this.Sysctls)) + for k := range this.Sysctls { + keysForSysctls = append(keysForSysctls, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSysctls) + mapStringForSysctls := "map[string]string{" + for _, k := range keysForSysctls { + mapStringForSysctls += fmt.Sprintf("%v: %v,", k, this.Sysctls[k]) + } + mapStringForSysctls += "}" + s := strings.Join([]string{`&LinuxPodSandboxConfig{`, + `CgroupParent:` + fmt.Sprintf("%v", this.CgroupParent) + `,`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "LinuxSandboxSecurityContext", "LinuxSandboxSecurityContext", 1) + `,`, + `Sysctls:` + mapStringForSysctls + `,`, + `Overhead:` + strings.Replace(this.Overhead.String(), "LinuxContainerResources", "LinuxContainerResources", 1) + `,`, + `Resources:` + strings.Replace(this.Resources.String(), "LinuxContainerResources", "LinuxContainerResources", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSandboxMetadata) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSandboxMetadata{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Uid:` + fmt.Sprintf("%v", this.Uid) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Attempt:` + fmt.Sprintf("%v", this.Attempt) + `,`, + `}`, + }, "") + return s +} +func (this *PodSandboxConfig) String() string { + if this == nil { + return "nil" + } + repeatedStringForPortMappings := "[]*PortMapping{" + for _, f := range this.PortMappings { + repeatedStringForPortMappings += strings.Replace(f.String(), "PortMapping", "PortMapping", 1) + "," + } + repeatedStringForPortMappings += "}" + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&PodSandboxConfig{`, + `Metadata:` + strings.Replace(this.Metadata.String(), "PodSandboxMetadata", "PodSandboxMetadata", 1) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `LogDirectory:` + fmt.Sprintf("%v", this.LogDirectory) + `,`, + `DnsConfig:` + strings.Replace(this.DnsConfig.String(), "DNSConfig", "DNSConfig", 1) + `,`, + `PortMappings:` + repeatedStringForPortMappings + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `Linux:` + strings.Replace(this.Linux.String(), "LinuxPodSandboxConfig", "LinuxPodSandboxConfig", 1) + `,`, + `Windows:` + strings.Replace(this.Windows.String(), "WindowsPodSandboxConfig", "WindowsPodSandboxConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RunPodSandboxRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RunPodSandboxRequest{`, + `Config:` + strings.Replace(this.Config.String(), "PodSandboxConfig", "PodSandboxConfig", 1) + `,`, + `RuntimeHandler:` + fmt.Sprintf("%v", this.RuntimeHandler) + `,`, + `}`, + }, "") + return s +} +func (this *RunPodSandboxResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RunPodSandboxResponse{`, + `PodSandboxId:` + fmt.Sprintf("%v", this.PodSandboxId) + `,`, + `}`, + }, "") + return s +} +func (this *StopPodSandboxRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StopPodSandboxRequest{`, + `PodSandboxId:` + fmt.Sprintf("%v", this.PodSandboxId) + `,`, + `}`, + }, "") + return s +} +func (this *StopPodSandboxResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StopPodSandboxResponse{`, + `}`, + }, "") + return s +} +func (this *RemovePodSandboxRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemovePodSandboxRequest{`, + `PodSandboxId:` + fmt.Sprintf("%v", this.PodSandboxId) + `,`, + `}`, + }, "") + return s +} +func (this *RemovePodSandboxResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemovePodSandboxResponse{`, + `}`, + }, "") + return s +} +func (this *PodSandboxStatusRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSandboxStatusRequest{`, + `PodSandboxId:` + fmt.Sprintf("%v", this.PodSandboxId) + `,`, + `Verbose:` + fmt.Sprintf("%v", this.Verbose) + `,`, + `}`, + }, "") + return s +} +func (this *PodIP) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodIP{`, + `Ip:` + fmt.Sprintf("%v", this.Ip) + `,`, + `}`, + }, "") + return s +} +func (this *PodSandboxNetworkStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForAdditionalIps := "[]*PodIP{" + for _, f := range this.AdditionalIps { + repeatedStringForAdditionalIps += strings.Replace(f.String(), "PodIP", "PodIP", 1) + "," + } + repeatedStringForAdditionalIps += "}" + s := strings.Join([]string{`&PodSandboxNetworkStatus{`, + `Ip:` + fmt.Sprintf("%v", this.Ip) + `,`, + `AdditionalIps:` + repeatedStringForAdditionalIps + `,`, + `}`, + }, "") + return s +} +func (this *Namespace) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Namespace{`, + `Options:` + strings.Replace(this.Options.String(), "NamespaceOption", "NamespaceOption", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LinuxPodSandboxStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LinuxPodSandboxStatus{`, + `Namespaces:` + strings.Replace(this.Namespaces.String(), "Namespace", "Namespace", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSandboxStatus) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&PodSandboxStatus{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `Metadata:` + strings.Replace(this.Metadata.String(), "PodSandboxMetadata", "PodSandboxMetadata", 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `CreatedAt:` + fmt.Sprintf("%v", this.CreatedAt) + `,`, + `Network:` + strings.Replace(this.Network.String(), "PodSandboxNetworkStatus", "PodSandboxNetworkStatus", 1) + `,`, + `Linux:` + strings.Replace(this.Linux.String(), "LinuxPodSandboxStatus", "LinuxPodSandboxStatus", 1) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `RuntimeHandler:` + fmt.Sprintf("%v", this.RuntimeHandler) + `,`, + `}`, + }, "") + return s +} +func (this *PodSandboxStatusResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForContainersStatuses := "[]*ContainerStatus{" + for _, f := range this.ContainersStatuses { + repeatedStringForContainersStatuses += strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1) + "," + } + repeatedStringForContainersStatuses += "}" + keysForInfo := make([]string, 0, len(this.Info)) + for k := range this.Info { + keysForInfo = append(keysForInfo, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForInfo) + mapStringForInfo := "map[string]string{" + for _, k := range keysForInfo { + mapStringForInfo += fmt.Sprintf("%v: %v,", k, this.Info[k]) + } + mapStringForInfo += "}" + s := strings.Join([]string{`&PodSandboxStatusResponse{`, + `Status:` + strings.Replace(this.Status.String(), "PodSandboxStatus", "PodSandboxStatus", 1) + `,`, + `Info:` + mapStringForInfo + `,`, + `ContainersStatuses:` + repeatedStringForContainersStatuses + `,`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `}`, + }, "") + return s +} +func (this *PodSandboxStateValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSandboxStateValue{`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} +func (this *PodSandboxFilter) String() string { + if this == nil { + return "nil" + } + keysForLabelSelector := make([]string, 0, len(this.LabelSelector)) + for k := range this.LabelSelector { + keysForLabelSelector = append(keysForLabelSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabelSelector) + mapStringForLabelSelector := "map[string]string{" + for _, k := range keysForLabelSelector { + mapStringForLabelSelector += fmt.Sprintf("%v: %v,", k, this.LabelSelector[k]) + } + mapStringForLabelSelector += "}" + s := strings.Join([]string{`&PodSandboxFilter{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `State:` + strings.Replace(this.State.String(), "PodSandboxStateValue", "PodSandboxStateValue", 1) + `,`, + `LabelSelector:` + mapStringForLabelSelector + `,`, + `}`, + }, "") + return s +} +func (this *ListPodSandboxRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListPodSandboxRequest{`, + `Filter:` + strings.Replace(this.Filter.String(), "PodSandboxFilter", "PodSandboxFilter", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSandbox) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&PodSandbox{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `Metadata:` + strings.Replace(this.Metadata.String(), "PodSandboxMetadata", "PodSandboxMetadata", 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `CreatedAt:` + fmt.Sprintf("%v", this.CreatedAt) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `RuntimeHandler:` + fmt.Sprintf("%v", this.RuntimeHandler) + `,`, + `}`, + }, "") + return s +} +func (this *ListPodSandboxResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]*PodSandbox{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(f.String(), "PodSandbox", "PodSandbox", 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ListPodSandboxResponse{`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodSandboxStatsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSandboxStatsRequest{`, + `PodSandboxId:` + fmt.Sprintf("%v", this.PodSandboxId) + `,`, + `}`, + }, "") + return s +} +func (this *PodSandboxStatsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSandboxStatsResponse{`, + `Stats:` + strings.Replace(this.Stats.String(), "PodSandboxStats", "PodSandboxStats", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSandboxStatsFilter) String() string { + if this == nil { + return "nil" + } + keysForLabelSelector := make([]string, 0, len(this.LabelSelector)) + for k := range this.LabelSelector { + keysForLabelSelector = append(keysForLabelSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabelSelector) + mapStringForLabelSelector := "map[string]string{" + for _, k := range keysForLabelSelector { + mapStringForLabelSelector += fmt.Sprintf("%v: %v,", k, this.LabelSelector[k]) + } + mapStringForLabelSelector += "}" + s := strings.Join([]string{`&PodSandboxStatsFilter{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `LabelSelector:` + mapStringForLabelSelector + `,`, + `}`, + }, "") + return s +} +func (this *ListPodSandboxStatsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListPodSandboxStatsRequest{`, + `Filter:` + strings.Replace(this.Filter.String(), "PodSandboxStatsFilter", "PodSandboxStatsFilter", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListPodSandboxStatsResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForStats := "[]*PodSandboxStats{" + for _, f := range this.Stats { + repeatedStringForStats += strings.Replace(f.String(), "PodSandboxStats", "PodSandboxStats", 1) + "," + } + repeatedStringForStats += "}" + s := strings.Join([]string{`&ListPodSandboxStatsResponse{`, + `Stats:` + repeatedStringForStats + `,`, + `}`, + }, "") + return s +} +func (this *PodSandboxAttributes) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&PodSandboxAttributes{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `Metadata:` + strings.Replace(this.Metadata.String(), "PodSandboxMetadata", "PodSandboxMetadata", 1) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `}`, + }, "") + return s +} +func (this *PodSandboxStats) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSandboxStats{`, + `Attributes:` + strings.Replace(this.Attributes.String(), "PodSandboxAttributes", "PodSandboxAttributes", 1) + `,`, + `Linux:` + strings.Replace(this.Linux.String(), "LinuxPodSandboxStats", "LinuxPodSandboxStats", 1) + `,`, + `Windows:` + strings.Replace(this.Windows.String(), "WindowsPodSandboxStats", "WindowsPodSandboxStats", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LinuxPodSandboxStats) String() string { + if this == nil { + return "nil" + } + repeatedStringForContainers := "[]*ContainerStats{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(f.String(), "ContainerStats", "ContainerStats", 1) + "," + } + repeatedStringForContainers += "}" + s := strings.Join([]string{`&LinuxPodSandboxStats{`, + `Cpu:` + strings.Replace(this.Cpu.String(), "CpuUsage", "CpuUsage", 1) + `,`, + `Memory:` + strings.Replace(this.Memory.String(), "MemoryUsage", "MemoryUsage", 1) + `,`, + `Network:` + strings.Replace(this.Network.String(), "NetworkUsage", "NetworkUsage", 1) + `,`, + `Process:` + strings.Replace(this.Process.String(), "ProcessUsage", "ProcessUsage", 1) + `,`, + `Containers:` + repeatedStringForContainers + `,`, + `}`, + }, "") + return s +} +func (this *WindowsPodSandboxStats) String() string { + if this == nil { + return "nil" + } + repeatedStringForContainers := "[]*WindowsContainerStats{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(f.String(), "WindowsContainerStats", "WindowsContainerStats", 1) + "," + } + repeatedStringForContainers += "}" + s := strings.Join([]string{`&WindowsPodSandboxStats{`, + `Cpu:` + strings.Replace(this.Cpu.String(), "WindowsCpuUsage", "WindowsCpuUsage", 1) + `,`, + `Memory:` + strings.Replace(this.Memory.String(), "WindowsMemoryUsage", "WindowsMemoryUsage", 1) + `,`, + `Network:` + strings.Replace(this.Network.String(), "WindowsNetworkUsage", "WindowsNetworkUsage", 1) + `,`, + `Process:` + strings.Replace(this.Process.String(), "WindowsProcessUsage", "WindowsProcessUsage", 1) + `,`, + `Containers:` + repeatedStringForContainers + `,`, + `}`, + }, "") + return s +} +func (this *NetworkUsage) String() string { + if this == nil { + return "nil" + } + repeatedStringForInterfaces := "[]*NetworkInterfaceUsage{" + for _, f := range this.Interfaces { + repeatedStringForInterfaces += strings.Replace(f.String(), "NetworkInterfaceUsage", "NetworkInterfaceUsage", 1) + "," + } + repeatedStringForInterfaces += "}" + s := strings.Join([]string{`&NetworkUsage{`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `DefaultInterface:` + strings.Replace(this.DefaultInterface.String(), "NetworkInterfaceUsage", "NetworkInterfaceUsage", 1) + `,`, + `Interfaces:` + repeatedStringForInterfaces + `,`, + `}`, + }, "") + return s +} +func (this *WindowsNetworkUsage) String() string { + if this == nil { + return "nil" + } + repeatedStringForInterfaces := "[]*WindowsNetworkInterfaceUsage{" + for _, f := range this.Interfaces { + repeatedStringForInterfaces += strings.Replace(f.String(), "WindowsNetworkInterfaceUsage", "WindowsNetworkInterfaceUsage", 1) + "," + } + repeatedStringForInterfaces += "}" + s := strings.Join([]string{`&WindowsNetworkUsage{`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `DefaultInterface:` + strings.Replace(this.DefaultInterface.String(), "WindowsNetworkInterfaceUsage", "WindowsNetworkInterfaceUsage", 1) + `,`, + `Interfaces:` + repeatedStringForInterfaces + `,`, + `}`, + }, "") + return s +} +func (this *NetworkInterfaceUsage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkInterfaceUsage{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RxBytes:` + strings.Replace(this.RxBytes.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `RxErrors:` + strings.Replace(this.RxErrors.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `TxBytes:` + strings.Replace(this.TxBytes.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `TxErrors:` + strings.Replace(this.TxErrors.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WindowsNetworkInterfaceUsage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WindowsNetworkInterfaceUsage{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RxBytes:` + strings.Replace(this.RxBytes.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `RxPacketsDropped:` + strings.Replace(this.RxPacketsDropped.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `TxBytes:` + strings.Replace(this.TxBytes.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `TxPacketsDropped:` + strings.Replace(this.TxPacketsDropped.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ProcessUsage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProcessUsage{`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `ProcessCount:` + strings.Replace(this.ProcessCount.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WindowsProcessUsage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WindowsProcessUsage{`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `ProcessCount:` + strings.Replace(this.ProcessCount.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageSpec) String() string { + if this == nil { + return "nil" + } + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ImageSpec{`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `UserSpecifiedImage:` + fmt.Sprintf("%v", this.UserSpecifiedImage) + `,`, + `RuntimeHandler:` + fmt.Sprintf("%v", this.RuntimeHandler) + `,`, + `}`, + }, "") + return s +} +func (this *KeyValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KeyValue{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *LinuxContainerResources) String() string { + if this == nil { + return "nil" + } + repeatedStringForHugepageLimits := "[]*HugepageLimit{" + for _, f := range this.HugepageLimits { + repeatedStringForHugepageLimits += strings.Replace(f.String(), "HugepageLimit", "HugepageLimit", 1) + "," + } + repeatedStringForHugepageLimits += "}" + keysForUnified := make([]string, 0, len(this.Unified)) + for k := range this.Unified { + keysForUnified = append(keysForUnified, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUnified) + mapStringForUnified := "map[string]string{" + for _, k := range keysForUnified { + mapStringForUnified += fmt.Sprintf("%v: %v,", k, this.Unified[k]) + } + mapStringForUnified += "}" + s := strings.Join([]string{`&LinuxContainerResources{`, + `CpuPeriod:` + fmt.Sprintf("%v", this.CpuPeriod) + `,`, + `CpuQuota:` + fmt.Sprintf("%v", this.CpuQuota) + `,`, + `CpuShares:` + fmt.Sprintf("%v", this.CpuShares) + `,`, + `MemoryLimitInBytes:` + fmt.Sprintf("%v", this.MemoryLimitInBytes) + `,`, + `OomScoreAdj:` + fmt.Sprintf("%v", this.OomScoreAdj) + `,`, + `CpusetCpus:` + fmt.Sprintf("%v", this.CpusetCpus) + `,`, + `CpusetMems:` + fmt.Sprintf("%v", this.CpusetMems) + `,`, + `HugepageLimits:` + repeatedStringForHugepageLimits + `,`, + `Unified:` + mapStringForUnified + `,`, + `MemorySwapLimitInBytes:` + fmt.Sprintf("%v", this.MemorySwapLimitInBytes) + `,`, + `}`, + }, "") + return s +} +func (this *HugepageLimit) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HugepageLimit{`, + `PageSize:` + fmt.Sprintf("%v", this.PageSize) + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `}`, + }, "") + return s +} +func (this *SELinuxOption) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SELinuxOption{`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `}`, + }, "") + return s +} +func (this *Capability) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Capability{`, + `AddCapabilities:` + fmt.Sprintf("%v", this.AddCapabilities) + `,`, + `DropCapabilities:` + fmt.Sprintf("%v", this.DropCapabilities) + `,`, + `AddAmbientCapabilities:` + fmt.Sprintf("%v", this.AddAmbientCapabilities) + `,`, + `}`, + }, "") + return s +} +func (this *LinuxContainerSecurityContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LinuxContainerSecurityContext{`, + `Capabilities:` + strings.Replace(this.Capabilities.String(), "Capability", "Capability", 1) + `,`, + `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, + `NamespaceOptions:` + strings.Replace(this.NamespaceOptions.String(), "NamespaceOption", "NamespaceOption", 1) + `,`, + `SelinuxOptions:` + strings.Replace(this.SelinuxOptions.String(), "SELinuxOption", "SELinuxOption", 1) + `,`, + `RunAsUser:` + strings.Replace(this.RunAsUser.String(), "Int64Value", "Int64Value", 1) + `,`, + `RunAsUsername:` + fmt.Sprintf("%v", this.RunAsUsername) + `,`, + `ReadonlyRootfs:` + fmt.Sprintf("%v", this.ReadonlyRootfs) + `,`, + `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, + `ApparmorProfile:` + fmt.Sprintf("%v", this.ApparmorProfile) + `,`, + `SeccompProfilePath:` + fmt.Sprintf("%v", this.SeccompProfilePath) + `,`, + `NoNewPrivs:` + fmt.Sprintf("%v", this.NoNewPrivs) + `,`, + `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "Int64Value", "Int64Value", 1) + `,`, + `MaskedPaths:` + fmt.Sprintf("%v", this.MaskedPaths) + `,`, + `ReadonlyPaths:` + fmt.Sprintf("%v", this.ReadonlyPaths) + `,`, + `Seccomp:` + strings.Replace(this.Seccomp.String(), "SecurityProfile", "SecurityProfile", 1) + `,`, + `Apparmor:` + strings.Replace(this.Apparmor.String(), "SecurityProfile", "SecurityProfile", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LinuxContainerConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LinuxContainerConfig{`, + `Resources:` + strings.Replace(this.Resources.String(), "LinuxContainerResources", "LinuxContainerResources", 1) + `,`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "LinuxContainerSecurityContext", "LinuxContainerSecurityContext", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WindowsNamespaceOption) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WindowsNamespaceOption{`, + `Network:` + fmt.Sprintf("%v", this.Network) + `,`, + `}`, + }, "") + return s +} +func (this *WindowsSandboxSecurityContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WindowsSandboxSecurityContext{`, + `RunAsUsername:` + fmt.Sprintf("%v", this.RunAsUsername) + `,`, + `CredentialSpec:` + fmt.Sprintf("%v", this.CredentialSpec) + `,`, + `HostProcess:` + fmt.Sprintf("%v", this.HostProcess) + `,`, + `NamespaceOptions:` + strings.Replace(this.NamespaceOptions.String(), "WindowsNamespaceOption", "WindowsNamespaceOption", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WindowsPodSandboxConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WindowsPodSandboxConfig{`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "WindowsSandboxSecurityContext", "WindowsSandboxSecurityContext", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WindowsContainerSecurityContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WindowsContainerSecurityContext{`, + `RunAsUsername:` + fmt.Sprintf("%v", this.RunAsUsername) + `,`, + `CredentialSpec:` + fmt.Sprintf("%v", this.CredentialSpec) + `,`, + `HostProcess:` + fmt.Sprintf("%v", this.HostProcess) + `,`, + `}`, + }, "") + return s +} +func (this *WindowsContainerConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WindowsContainerConfig{`, + `Resources:` + strings.Replace(this.Resources.String(), "WindowsContainerResources", "WindowsContainerResources", 1) + `,`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "WindowsContainerSecurityContext", "WindowsContainerSecurityContext", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WindowsContainerResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WindowsContainerResources{`, + `CpuShares:` + fmt.Sprintf("%v", this.CpuShares) + `,`, + `CpuCount:` + fmt.Sprintf("%v", this.CpuCount) + `,`, + `CpuMaximum:` + fmt.Sprintf("%v", this.CpuMaximum) + `,`, + `MemoryLimitInBytes:` + fmt.Sprintf("%v", this.MemoryLimitInBytes) + `,`, + `RootfsSizeInBytes:` + fmt.Sprintf("%v", this.RootfsSizeInBytes) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerMetadata) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerMetadata{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Attempt:` + fmt.Sprintf("%v", this.Attempt) + `,`, + `}`, + }, "") + return s +} +func (this *Device) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Device{`, + `ContainerPath:` + fmt.Sprintf("%v", this.ContainerPath) + `,`, + `HostPath:` + fmt.Sprintf("%v", this.HostPath) + `,`, + `Permissions:` + fmt.Sprintf("%v", this.Permissions) + `,`, + `}`, + }, "") + return s +} +func (this *CDIDevice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CDIDevice{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerConfig) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnvs := "[]*KeyValue{" + for _, f := range this.Envs { + repeatedStringForEnvs += strings.Replace(f.String(), "KeyValue", "KeyValue", 1) + "," + } + repeatedStringForEnvs += "}" + repeatedStringForMounts := "[]*Mount{" + for _, f := range this.Mounts { + repeatedStringForMounts += strings.Replace(f.String(), "Mount", "Mount", 1) + "," + } + repeatedStringForMounts += "}" + repeatedStringForDevices := "[]*Device{" + for _, f := range this.Devices { + repeatedStringForDevices += strings.Replace(f.String(), "Device", "Device", 1) + "," + } + repeatedStringForDevices += "}" + repeatedStringForCDIDevices := "[]*CDIDevice{" + for _, f := range this.CDIDevices { + repeatedStringForCDIDevices += strings.Replace(f.String(), "CDIDevice", "CDIDevice", 1) + "," + } + repeatedStringForCDIDevices += "}" + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ContainerConfig{`, + `Metadata:` + strings.Replace(this.Metadata.String(), "ContainerMetadata", "ContainerMetadata", 1) + `,`, + `Image:` + strings.Replace(this.Image.String(), "ImageSpec", "ImageSpec", 1) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, + `Envs:` + repeatedStringForEnvs + `,`, + `Mounts:` + repeatedStringForMounts + `,`, + `Devices:` + repeatedStringForDevices + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `LogPath:` + fmt.Sprintf("%v", this.LogPath) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, + `Tty:` + fmt.Sprintf("%v", this.Tty) + `,`, + `Linux:` + strings.Replace(this.Linux.String(), "LinuxContainerConfig", "LinuxContainerConfig", 1) + `,`, + `Windows:` + strings.Replace(this.Windows.String(), "WindowsContainerConfig", "WindowsContainerConfig", 1) + `,`, + `CDIDevices:` + repeatedStringForCDIDevices + `,`, + `}`, + }, "") + return s +} +func (this *CreateContainerRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateContainerRequest{`, + `PodSandboxId:` + fmt.Sprintf("%v", this.PodSandboxId) + `,`, + `Config:` + strings.Replace(this.Config.String(), "ContainerConfig", "ContainerConfig", 1) + `,`, + `SandboxConfig:` + strings.Replace(this.SandboxConfig.String(), "PodSandboxConfig", "PodSandboxConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateContainerResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateContainerResponse{`, + `ContainerId:` + fmt.Sprintf("%v", this.ContainerId) + `,`, + `}`, + }, "") + return s +} +func (this *StartContainerRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StartContainerRequest{`, + `ContainerId:` + fmt.Sprintf("%v", this.ContainerId) + `,`, + `}`, + }, "") + return s +} +func (this *StartContainerResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StartContainerResponse{`, + `}`, + }, "") + return s +} +func (this *StopContainerRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StopContainerRequest{`, + `ContainerId:` + fmt.Sprintf("%v", this.ContainerId) + `,`, + `Timeout:` + fmt.Sprintf("%v", this.Timeout) + `,`, + `}`, + }, "") + return s +} +func (this *StopContainerResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StopContainerResponse{`, + `}`, + }, "") + return s +} +func (this *RemoveContainerRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveContainerRequest{`, + `ContainerId:` + fmt.Sprintf("%v", this.ContainerId) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveContainerResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveContainerResponse{`, + `}`, + }, "") + return s +} +func (this *ContainerStateValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateValue{`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerFilter) String() string { + if this == nil { + return "nil" + } + keysForLabelSelector := make([]string, 0, len(this.LabelSelector)) + for k := range this.LabelSelector { + keysForLabelSelector = append(keysForLabelSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabelSelector) + mapStringForLabelSelector := "map[string]string{" + for _, k := range keysForLabelSelector { + mapStringForLabelSelector += fmt.Sprintf("%v: %v,", k, this.LabelSelector[k]) + } + mapStringForLabelSelector += "}" + s := strings.Join([]string{`&ContainerFilter{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `State:` + strings.Replace(this.State.String(), "ContainerStateValue", "ContainerStateValue", 1) + `,`, + `PodSandboxId:` + fmt.Sprintf("%v", this.PodSandboxId) + `,`, + `LabelSelector:` + mapStringForLabelSelector + `,`, + `}`, + }, "") + return s +} +func (this *ListContainersRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListContainersRequest{`, + `Filter:` + strings.Replace(this.Filter.String(), "ContainerFilter", "ContainerFilter", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Container) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&Container{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `PodSandboxId:` + fmt.Sprintf("%v", this.PodSandboxId) + `,`, + `Metadata:` + strings.Replace(this.Metadata.String(), "ContainerMetadata", "ContainerMetadata", 1) + `,`, + `Image:` + strings.Replace(this.Image.String(), "ImageSpec", "ImageSpec", 1) + `,`, + `ImageRef:` + fmt.Sprintf("%v", this.ImageRef) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `CreatedAt:` + fmt.Sprintf("%v", this.CreatedAt) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `}`, + }, "") + return s +} +func (this *ListContainersResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForContainers := "[]*Container{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(f.String(), "Container", "Container", 1) + "," + } + repeatedStringForContainers += "}" + s := strings.Join([]string{`&ListContainersResponse{`, + `Containers:` + repeatedStringForContainers + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatusRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStatusRequest{`, + `ContainerId:` + fmt.Sprintf("%v", this.ContainerId) + `,`, + `Verbose:` + fmt.Sprintf("%v", this.Verbose) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForMounts := "[]*Mount{" + for _, f := range this.Mounts { + repeatedStringForMounts += strings.Replace(f.String(), "Mount", "Mount", 1) + "," + } + repeatedStringForMounts += "}" + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ContainerStatus{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `Metadata:` + strings.Replace(this.Metadata.String(), "ContainerMetadata", "ContainerMetadata", 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `CreatedAt:` + fmt.Sprintf("%v", this.CreatedAt) + `,`, + `StartedAt:` + fmt.Sprintf("%v", this.StartedAt) + `,`, + `FinishedAt:` + fmt.Sprintf("%v", this.FinishedAt) + `,`, + `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, + `Image:` + strings.Replace(this.Image.String(), "ImageSpec", "ImageSpec", 1) + `,`, + `ImageRef:` + fmt.Sprintf("%v", this.ImageRef) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `Mounts:` + repeatedStringForMounts + `,`, + `LogPath:` + fmt.Sprintf("%v", this.LogPath) + `,`, + `Resources:` + strings.Replace(this.Resources.String(), "ContainerResources", "ContainerResources", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatusResponse) String() string { + if this == nil { + return "nil" + } + keysForInfo := make([]string, 0, len(this.Info)) + for k := range this.Info { + keysForInfo = append(keysForInfo, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForInfo) + mapStringForInfo := "map[string]string{" + for _, k := range keysForInfo { + mapStringForInfo += fmt.Sprintf("%v: %v,", k, this.Info[k]) + } + mapStringForInfo += "}" + s := strings.Join([]string{`&ContainerStatusResponse{`, + `Status:` + strings.Replace(this.Status.String(), "ContainerStatus", "ContainerStatus", 1) + `,`, + `Info:` + mapStringForInfo + `,`, + `}`, + }, "") + return s +} +func (this *ContainerResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerResources{`, + `Linux:` + strings.Replace(this.Linux.String(), "LinuxContainerResources", "LinuxContainerResources", 1) + `,`, + `Windows:` + strings.Replace(this.Windows.String(), "WindowsContainerResources", "WindowsContainerResources", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateContainerResourcesRequest) String() string { + if this == nil { + return "nil" + } + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&UpdateContainerResourcesRequest{`, + `ContainerId:` + fmt.Sprintf("%v", this.ContainerId) + `,`, + `Linux:` + strings.Replace(this.Linux.String(), "LinuxContainerResources", "LinuxContainerResources", 1) + `,`, + `Windows:` + strings.Replace(this.Windows.String(), "WindowsContainerResources", "WindowsContainerResources", 1) + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `}`, + }, "") + return s +} +func (this *UpdateContainerResourcesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateContainerResourcesResponse{`, + `}`, + }, "") + return s +} +func (this *ExecSyncRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecSyncRequest{`, + `ContainerId:` + fmt.Sprintf("%v", this.ContainerId) + `,`, + `Cmd:` + fmt.Sprintf("%v", this.Cmd) + `,`, + `Timeout:` + fmt.Sprintf("%v", this.Timeout) + `,`, + `}`, + }, "") + return s +} +func (this *ExecSyncResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecSyncResponse{`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, + `}`, + }, "") + return s +} +func (this *ExecRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecRequest{`, + `ContainerId:` + fmt.Sprintf("%v", this.ContainerId) + `,`, + `Cmd:` + fmt.Sprintf("%v", this.Cmd) + `,`, + `Tty:` + fmt.Sprintf("%v", this.Tty) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `}`, + }, "") + return s +} +func (this *ExecResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecResponse{`, + `Url:` + fmt.Sprintf("%v", this.Url) + `,`, + `}`, + }, "") + return s +} +func (this *AttachRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AttachRequest{`, + `ContainerId:` + fmt.Sprintf("%v", this.ContainerId) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Tty:` + fmt.Sprintf("%v", this.Tty) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `}`, + }, "") + return s +} +func (this *AttachResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AttachResponse{`, + `Url:` + fmt.Sprintf("%v", this.Url) + `,`, + `}`, + }, "") + return s +} +func (this *PortForwardRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortForwardRequest{`, + `PodSandboxId:` + fmt.Sprintf("%v", this.PodSandboxId) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *PortForwardResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortForwardResponse{`, + `Url:` + fmt.Sprintf("%v", this.Url) + `,`, + `}`, + }, "") + return s +} +func (this *ImageFilter) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageFilter{`, + `Image:` + strings.Replace(this.Image.String(), "ImageSpec", "ImageSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListImagesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListImagesRequest{`, + `Filter:` + strings.Replace(this.Filter.String(), "ImageFilter", "ImageFilter", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Image) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Image{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `RepoTags:` + fmt.Sprintf("%v", this.RepoTags) + `,`, + `RepoDigests:` + fmt.Sprintf("%v", this.RepoDigests) + `,`, + `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, + `Uid:` + strings.Replace(this.Uid.String(), "Int64Value", "Int64Value", 1) + `,`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `Spec:` + strings.Replace(this.Spec.String(), "ImageSpec", "ImageSpec", 1) + `,`, + `Pinned:` + fmt.Sprintf("%v", this.Pinned) + `,`, + `}`, + }, "") + return s +} +func (this *ListImagesResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForImages := "[]*Image{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(f.String(), "Image", "Image", 1) + "," + } + repeatedStringForImages += "}" + s := strings.Join([]string{`&ListImagesResponse{`, + `Images:` + repeatedStringForImages + `,`, + `}`, + }, "") + return s +} +func (this *ImageStatusRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStatusRequest{`, + `Image:` + strings.Replace(this.Image.String(), "ImageSpec", "ImageSpec", 1) + `,`, + `Verbose:` + fmt.Sprintf("%v", this.Verbose) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStatusResponse) String() string { + if this == nil { + return "nil" + } + keysForInfo := make([]string, 0, len(this.Info)) + for k := range this.Info { + keysForInfo = append(keysForInfo, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForInfo) + mapStringForInfo := "map[string]string{" + for _, k := range keysForInfo { + mapStringForInfo += fmt.Sprintf("%v: %v,", k, this.Info[k]) + } + mapStringForInfo += "}" + s := strings.Join([]string{`&ImageStatusResponse{`, + `Image:` + strings.Replace(this.Image.String(), "Image", "Image", 1) + `,`, + `Info:` + mapStringForInfo + `,`, + `}`, + }, "") + return s +} +func (this *AuthConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuthConfig{`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `Password:` + fmt.Sprintf("%v", this.Password) + `,`, + `Auth:` + fmt.Sprintf("%v", this.Auth) + `,`, + `ServerAddress:` + fmt.Sprintf("%v", this.ServerAddress) + `,`, + `IdentityToken:` + fmt.Sprintf("%v", this.IdentityToken) + `,`, + `RegistryToken:` + fmt.Sprintf("%v", this.RegistryToken) + `,`, + `}`, + }, "") + return s +} +func (this *PullImageRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PullImageRequest{`, + `Image:` + strings.Replace(this.Image.String(), "ImageSpec", "ImageSpec", 1) + `,`, + `Auth:` + strings.Replace(this.Auth.String(), "AuthConfig", "AuthConfig", 1) + `,`, + `SandboxConfig:` + strings.Replace(this.SandboxConfig.String(), "PodSandboxConfig", "PodSandboxConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PullImageResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PullImageResponse{`, + `ImageRef:` + fmt.Sprintf("%v", this.ImageRef) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveImageRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveImageRequest{`, + `Image:` + strings.Replace(this.Image.String(), "ImageSpec", "ImageSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveImageResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveImageResponse{`, + `}`, + }, "") + return s +} +func (this *NetworkConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkConfig{`, + `PodCidr:` + fmt.Sprintf("%v", this.PodCidr) + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeConfig{`, + `NetworkConfig:` + strings.Replace(this.NetworkConfig.String(), "NetworkConfig", "NetworkConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateRuntimeConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateRuntimeConfigRequest{`, + `RuntimeConfig:` + strings.Replace(this.RuntimeConfig.String(), "RuntimeConfig", "RuntimeConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateRuntimeConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateRuntimeConfigResponse{`, + `}`, + }, "") + return s +} +func (this *RuntimeCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]*RuntimeCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(f.String(), "RuntimeCondition", "RuntimeCondition", 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&RuntimeStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *StatusRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatusRequest{`, + `Verbose:` + fmt.Sprintf("%v", this.Verbose) + `,`, + `}`, + }, "") + return s +} +func (this *StatusResponse) String() string { + if this == nil { + return "nil" + } + keysForInfo := make([]string, 0, len(this.Info)) + for k := range this.Info { + keysForInfo = append(keysForInfo, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForInfo) + mapStringForInfo := "map[string]string{" + for _, k := range keysForInfo { + mapStringForInfo += fmt.Sprintf("%v: %v,", k, this.Info[k]) + } + mapStringForInfo += "}" + s := strings.Join([]string{`&StatusResponse{`, + `Status:` + strings.Replace(this.Status.String(), "RuntimeStatus", "RuntimeStatus", 1) + `,`, + `Info:` + mapStringForInfo + `,`, + `}`, + }, "") + return s +} +func (this *ImageFsInfoRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageFsInfoRequest{`, + `}`, + }, "") + return s +} +func (this *UInt64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *FilesystemIdentifier) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FilesystemIdentifier{`, + `Mountpoint:` + fmt.Sprintf("%v", this.Mountpoint) + `,`, + `}`, + }, "") + return s +} +func (this *FilesystemUsage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FilesystemUsage{`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `FsId:` + strings.Replace(this.FsId.String(), "FilesystemIdentifier", "FilesystemIdentifier", 1) + `,`, + `UsedBytes:` + strings.Replace(this.UsedBytes.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `InodesUsed:` + strings.Replace(this.InodesUsed.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WindowsFilesystemUsage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WindowsFilesystemUsage{`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `FsId:` + strings.Replace(this.FsId.String(), "FilesystemIdentifier", "FilesystemIdentifier", 1) + `,`, + `UsedBytes:` + strings.Replace(this.UsedBytes.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageFsInfoResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForImageFilesystems := "[]*FilesystemUsage{" + for _, f := range this.ImageFilesystems { + repeatedStringForImageFilesystems += strings.Replace(f.String(), "FilesystemUsage", "FilesystemUsage", 1) + "," + } + repeatedStringForImageFilesystems += "}" + repeatedStringForContainerFilesystems := "[]*FilesystemUsage{" + for _, f := range this.ContainerFilesystems { + repeatedStringForContainerFilesystems += strings.Replace(f.String(), "FilesystemUsage", "FilesystemUsage", 1) + "," + } + repeatedStringForContainerFilesystems += "}" + s := strings.Join([]string{`&ImageFsInfoResponse{`, + `ImageFilesystems:` + repeatedStringForImageFilesystems + `,`, + `ContainerFilesystems:` + repeatedStringForContainerFilesystems + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStatsRequest{`, + `ContainerId:` + fmt.Sprintf("%v", this.ContainerId) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStatsResponse{`, + `Stats:` + strings.Replace(this.Stats.String(), "ContainerStats", "ContainerStats", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListContainerStatsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListContainerStatsRequest{`, + `Filter:` + strings.Replace(this.Filter.String(), "ContainerStatsFilter", "ContainerStatsFilter", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatsFilter) String() string { + if this == nil { + return "nil" + } + keysForLabelSelector := make([]string, 0, len(this.LabelSelector)) + for k := range this.LabelSelector { + keysForLabelSelector = append(keysForLabelSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabelSelector) + mapStringForLabelSelector := "map[string]string{" + for _, k := range keysForLabelSelector { + mapStringForLabelSelector += fmt.Sprintf("%v: %v,", k, this.LabelSelector[k]) + } + mapStringForLabelSelector += "}" + s := strings.Join([]string{`&ContainerStatsFilter{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `PodSandboxId:` + fmt.Sprintf("%v", this.PodSandboxId) + `,`, + `LabelSelector:` + mapStringForLabelSelector + `,`, + `}`, + }, "") + return s +} +func (this *ListContainerStatsResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForStats := "[]*ContainerStats{" + for _, f := range this.Stats { + repeatedStringForStats += strings.Replace(f.String(), "ContainerStats", "ContainerStats", 1) + "," + } + repeatedStringForStats += "}" + s := strings.Join([]string{`&ListContainerStatsResponse{`, + `Stats:` + repeatedStringForStats + `,`, + `}`, + }, "") + return s +} +func (this *ContainerAttributes) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ContainerAttributes{`, + `Id:` + fmt.Sprintf("%v", this.Id) + `,`, + `Metadata:` + strings.Replace(this.Metadata.String(), "ContainerMetadata", "ContainerMetadata", 1) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStats) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStats{`, + `Attributes:` + strings.Replace(this.Attributes.String(), "ContainerAttributes", "ContainerAttributes", 1) + `,`, + `Cpu:` + strings.Replace(this.Cpu.String(), "CpuUsage", "CpuUsage", 1) + `,`, + `Memory:` + strings.Replace(this.Memory.String(), "MemoryUsage", "MemoryUsage", 1) + `,`, + `WritableLayer:` + strings.Replace(this.WritableLayer.String(), "FilesystemUsage", "FilesystemUsage", 1) + `,`, + `Swap:` + strings.Replace(this.Swap.String(), "SwapUsage", "SwapUsage", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WindowsContainerStats) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WindowsContainerStats{`, + `Attributes:` + strings.Replace(this.Attributes.String(), "ContainerAttributes", "ContainerAttributes", 1) + `,`, + `Cpu:` + strings.Replace(this.Cpu.String(), "WindowsCpuUsage", "WindowsCpuUsage", 1) + `,`, + `Memory:` + strings.Replace(this.Memory.String(), "WindowsMemoryUsage", "WindowsMemoryUsage", 1) + `,`, + `WritableLayer:` + strings.Replace(this.WritableLayer.String(), "WindowsFilesystemUsage", "WindowsFilesystemUsage", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CpuUsage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CpuUsage{`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `UsageCoreNanoSeconds:` + strings.Replace(this.UsageCoreNanoSeconds.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `UsageNanoCores:` + strings.Replace(this.UsageNanoCores.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WindowsCpuUsage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WindowsCpuUsage{`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `UsageCoreNanoSeconds:` + strings.Replace(this.UsageCoreNanoSeconds.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `UsageNanoCores:` + strings.Replace(this.UsageNanoCores.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MemoryUsage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MemoryUsage{`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `WorkingSetBytes:` + strings.Replace(this.WorkingSetBytes.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `AvailableBytes:` + strings.Replace(this.AvailableBytes.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `UsageBytes:` + strings.Replace(this.UsageBytes.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `RssBytes:` + strings.Replace(this.RssBytes.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `PageFaults:` + strings.Replace(this.PageFaults.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `MajorPageFaults:` + strings.Replace(this.MajorPageFaults.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SwapUsage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SwapUsage{`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `SwapAvailableBytes:` + strings.Replace(this.SwapAvailableBytes.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `SwapUsageBytes:` + strings.Replace(this.SwapUsageBytes.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WindowsMemoryUsage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WindowsMemoryUsage{`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `WorkingSetBytes:` + strings.Replace(this.WorkingSetBytes.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `AvailableBytes:` + strings.Replace(this.AvailableBytes.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `PageFaults:` + strings.Replace(this.PageFaults.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `CommitMemoryBytes:` + strings.Replace(this.CommitMemoryBytes.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReopenContainerLogRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReopenContainerLogRequest{`, + `ContainerId:` + fmt.Sprintf("%v", this.ContainerId) + `,`, + `}`, + }, "") + return s +} +func (this *ReopenContainerLogResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReopenContainerLogResponse{`, + `}`, + }, "") + return s +} +func (this *CheckpointContainerRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CheckpointContainerRequest{`, + `ContainerId:` + fmt.Sprintf("%v", this.ContainerId) + `,`, + `Location:` + fmt.Sprintf("%v", this.Location) + `,`, + `Timeout:` + fmt.Sprintf("%v", this.Timeout) + `,`, + `}`, + }, "") + return s +} +func (this *CheckpointContainerResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CheckpointContainerResponse{`, + `}`, + }, "") + return s +} +func (this *GetEventsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetEventsRequest{`, + `}`, + }, "") + return s +} +func (this *ContainerEventResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForContainersStatuses := "[]*ContainerStatus{" + for _, f := range this.ContainersStatuses { + repeatedStringForContainersStatuses += strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1) + "," + } + repeatedStringForContainersStatuses += "}" + s := strings.Join([]string{`&ContainerEventResponse{`, + `ContainerId:` + fmt.Sprintf("%v", this.ContainerId) + `,`, + `ContainerEventType:` + fmt.Sprintf("%v", this.ContainerEventType) + `,`, + `CreatedAt:` + fmt.Sprintf("%v", this.CreatedAt) + `,`, + `PodSandboxStatus:` + strings.Replace(this.PodSandboxStatus.String(), "PodSandboxStatus", "PodSandboxStatus", 1) + `,`, + `ContainersStatuses:` + repeatedStringForContainersStatuses + `,`, + `}`, + }, "") + return s +} +func (this *ListMetricDescriptorsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListMetricDescriptorsRequest{`, + `}`, + }, "") + return s +} +func (this *ListMetricDescriptorsResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForDescriptors := "[]*MetricDescriptor{" + for _, f := range this.Descriptors { + repeatedStringForDescriptors += strings.Replace(f.String(), "MetricDescriptor", "MetricDescriptor", 1) + "," + } + repeatedStringForDescriptors += "}" + s := strings.Join([]string{`&ListMetricDescriptorsResponse{`, + `Descriptors:` + repeatedStringForDescriptors + `,`, + `}`, + }, "") + return s +} +func (this *MetricDescriptor) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricDescriptor{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Help:` + fmt.Sprintf("%v", this.Help) + `,`, + `LabelKeys:` + fmt.Sprintf("%v", this.LabelKeys) + `,`, + `}`, + }, "") + return s +} +func (this *ListPodSandboxMetricsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListPodSandboxMetricsRequest{`, + `}`, + }, "") + return s +} +func (this *ListPodSandboxMetricsResponse) String() string { + if this == nil { + return "nil" + } + repeatedStringForPodMetrics := "[]*PodSandboxMetrics{" + for _, f := range this.PodMetrics { + repeatedStringForPodMetrics += strings.Replace(f.String(), "PodSandboxMetrics", "PodSandboxMetrics", 1) + "," + } + repeatedStringForPodMetrics += "}" + s := strings.Join([]string{`&ListPodSandboxMetricsResponse{`, + `PodMetrics:` + repeatedStringForPodMetrics + `,`, + `}`, + }, "") + return s +} +func (this *PodSandboxMetrics) String() string { + if this == nil { + return "nil" + } + repeatedStringForMetrics := "[]*Metric{" + for _, f := range this.Metrics { + repeatedStringForMetrics += strings.Replace(f.String(), "Metric", "Metric", 1) + "," + } + repeatedStringForMetrics += "}" + repeatedStringForContainerMetrics := "[]*ContainerMetrics{" + for _, f := range this.ContainerMetrics { + repeatedStringForContainerMetrics += strings.Replace(f.String(), "ContainerMetrics", "ContainerMetrics", 1) + "," + } + repeatedStringForContainerMetrics += "}" + s := strings.Join([]string{`&PodSandboxMetrics{`, + `PodSandboxId:` + fmt.Sprintf("%v", this.PodSandboxId) + `,`, + `Metrics:` + repeatedStringForMetrics + `,`, + `ContainerMetrics:` + repeatedStringForContainerMetrics + `,`, + `}`, + }, "") + return s +} +func (this *ContainerMetrics) String() string { + if this == nil { + return "nil" + } + repeatedStringForMetrics := "[]*Metric{" + for _, f := range this.Metrics { + repeatedStringForMetrics += strings.Replace(f.String(), "Metric", "Metric", 1) + "," + } + repeatedStringForMetrics += "}" + s := strings.Join([]string{`&ContainerMetrics{`, + `ContainerId:` + fmt.Sprintf("%v", this.ContainerId) + `,`, + `Metrics:` + repeatedStringForMetrics + `,`, + `}`, + }, "") + return s +} +func (this *Metric) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Metric{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, + `MetricType:` + fmt.Sprintf("%v", this.MetricType) + `,`, + `LabelValues:` + fmt.Sprintf("%v", this.LabelValues) + `,`, + `Value:` + strings.Replace(this.Value.String(), "UInt64Value", "UInt64Value", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeConfigRequest{`, + `}`, + }, "") + return s +} +func (this *RuntimeConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeConfigResponse{`, + `Linux:` + strings.Replace(this.Linux.String(), "LinuxRuntimeConfiguration", "LinuxRuntimeConfiguration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LinuxRuntimeConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LinuxRuntimeConfiguration{`, + `CgroupDriver:` + fmt.Sprintf("%v", this.CgroupDriver) + `,`, + `}`, + }, "") + return s +} +func valueToStringApi(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *VersionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VersionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VersionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VersionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VersionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VersionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeApiVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeApiVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DNSConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DNSConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DNSConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Servers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Servers = append(m.Servers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Searches", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Searches = append(m.Searches, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortMapping) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortMapping: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortMapping: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + m.Protocol = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Protocol |= Protocol(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) + } + m.ContainerPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) + } + m.HostPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIp", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostIp = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Readonly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Readonly = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SelinuxRelabel", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SelinuxRelabel = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Propagation", wireType) + } + m.Propagation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Propagation |= MountPropagation(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UidMappings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UidMappings = append(m.UidMappings, &IDMapping{}) + if err := m.UidMappings[len(m.UidMappings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GidMappings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GidMappings = append(m.GidMappings, &IDMapping{}) + if err := m.GidMappings[len(m.GidMappings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IDMapping) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IDMapping: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IDMapping: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostId", wireType) + } + m.HostId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostId |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) + } + m.ContainerId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerId |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserNamespace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserNamespace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserNamespace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= NamespaceMode(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uids", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uids = append(m.Uids, &IDMapping{}) + if err := m.Uids[len(m.Uids)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gids", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gids = append(m.Gids, &IDMapping{}) + if err := m.Gids[len(m.Gids)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceOption) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceOption: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceOption: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + m.Network = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Network |= NamespaceMode(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) + } + m.Pid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Pid |= NamespaceMode(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ipc", wireType) + } + m.Ipc = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Ipc |= NamespaceMode(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsernsOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UsernsOptions == nil { + m.UsernsOptions = &UserNamespace{} + } + if err := m.UsernsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxSandboxSecurityContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxSandboxSecurityContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxSandboxSecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceOptions == nil { + m.NamespaceOptions = &NamespaceOption{} + } + if err := m.NamespaceOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SelinuxOptions == nil { + m.SelinuxOptions = &SELinuxOption{} + } + if err := m.SelinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RunAsUser == nil { + m.RunAsUser = &Int64Value{} + } + if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadonlyRootfs", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadonlyRootfs = bool(v != 0) + case 5: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.SupplementalGroups) == 0 { + m.SupplementalGroups = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Privileged = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeccompProfilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SeccompProfilePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RunAsGroup == nil { + m.RunAsGroup = &Int64Value{} + } + if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Seccomp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Seccomp == nil { + m.Seccomp = &SecurityProfile{} + } + if err := m.Seccomp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Apparmor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Apparmor == nil { + m.Apparmor = &SecurityProfile{} + } + if err := m.Apparmor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecurityProfile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecurityProfile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecurityProfile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProfileType", wireType) + } + m.ProfileType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ProfileType |= SecurityProfile_ProfileType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalhostRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LocalhostRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxPodSandboxConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxPodSandboxConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxPodSandboxConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CgroupParent", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CgroupParent = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &LinuxSandboxSecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sysctls", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Sysctls == nil { + m.Sysctls = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Sysctls[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = &LinuxContainerResources{} + } + if err := m.Overhead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &LinuxContainerResources{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSandboxMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSandboxMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSandboxMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Attempt", wireType) + } + m.Attempt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Attempt |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSandboxConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSandboxConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSandboxConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &PodSandboxMetadata{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogDirectory", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogDirectory = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DnsConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DnsConfig == nil { + m.DnsConfig = &DNSConfig{} + } + if err := m.DnsConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortMappings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PortMappings = append(m.PortMappings, &PortMapping{}) + if err := m.PortMappings[len(m.PortMappings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Linux == nil { + m.Linux = &LinuxPodSandboxConfig{} + } + if err := m.Linux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Windows", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Windows == nil { + m.Windows = &WindowsPodSandboxConfig{} + } + if err := m.Windows.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunPodSandboxRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunPodSandboxRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunPodSandboxRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &PodSandboxConfig{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeHandler", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeHandler = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunPodSandboxResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunPodSandboxResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunPodSandboxResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSandboxId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodSandboxId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StopPodSandboxRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StopPodSandboxRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StopPodSandboxRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSandboxId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodSandboxId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StopPodSandboxResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StopPodSandboxResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StopPodSandboxResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemovePodSandboxRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemovePodSandboxRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemovePodSandboxRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSandboxId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodSandboxId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemovePodSandboxResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemovePodSandboxResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemovePodSandboxResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSandboxStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSandboxStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSandboxStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSandboxId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodSandboxId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbose", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Verbose = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodIP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodIP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodIP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ip", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ip = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSandboxNetworkStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSandboxNetworkStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSandboxNetworkStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ip", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ip = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalIps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalIps = append(m.AdditionalIps, &PodIP{}) + if err := m.AdditionalIps[len(m.AdditionalIps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Namespace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Namespace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &NamespaceOption{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxPodSandboxStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxPodSandboxStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxPodSandboxStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Namespaces == nil { + m.Namespaces = &Namespace{} + } + if err := m.Namespaces.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSandboxStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSandboxStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSandboxStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &PodSandboxMetadata{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= PodSandboxState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &PodSandboxNetworkStatus{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Linux == nil { + m.Linux = &LinuxPodSandboxStatus{} + } + if err := m.Linux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeHandler", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeHandler = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSandboxStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSandboxStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSandboxStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &PodSandboxStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Info == nil { + m.Info = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Info[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainersStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainersStatuses = append(m.ContainersStatuses, &ContainerStatus{}) + if err := m.ContainersStatuses[len(m.ContainersStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSandboxStateValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSandboxStateValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSandboxStateValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= PodSandboxState(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSandboxFilter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSandboxFilter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSandboxFilter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.State == nil { + m.State = &PodSandboxStateValue{} + } + if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.LabelSelector[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListPodSandboxRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListPodSandboxRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListPodSandboxRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &PodSandboxFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSandbox) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSandbox: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSandbox: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &PodSandboxMetadata{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= PodSandboxState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeHandler", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeHandler = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListPodSandboxResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListPodSandboxResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListPodSandboxResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, &PodSandbox{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSandboxStatsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSandboxStatsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSandboxStatsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSandboxId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodSandboxId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSandboxStatsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSandboxStatsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSandboxStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Stats == nil { + m.Stats = &PodSandboxStats{} + } + if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSandboxStatsFilter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSandboxStatsFilter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSandboxStatsFilter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.LabelSelector[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListPodSandboxStatsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListPodSandboxStatsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListPodSandboxStatsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &PodSandboxStatsFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListPodSandboxStatsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListPodSandboxStatsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListPodSandboxStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stats = append(m.Stats, &PodSandboxStats{}) + if err := m.Stats[len(m.Stats)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSandboxAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSandboxAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSandboxAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &PodSandboxMetadata{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSandboxStats) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSandboxStats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSandboxStats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attributes == nil { + m.Attributes = &PodSandboxAttributes{} + } + if err := m.Attributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Linux == nil { + m.Linux = &LinuxPodSandboxStats{} + } + if err := m.Linux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Windows", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Windows == nil { + m.Windows = &WindowsPodSandboxStats{} + } + if err := m.Windows.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxPodSandboxStats) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxPodSandboxStats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxPodSandboxStats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cpu", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cpu == nil { + m.Cpu = &CpuUsage{} + } + if err := m.Cpu.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Memory == nil { + m.Memory = &MemoryUsage{} + } + if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &NetworkUsage{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Process", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Process == nil { + m.Process = &ProcessUsage{} + } + if err := m.Process.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, &ContainerStats{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WindowsPodSandboxStats) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowsPodSandboxStats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowsPodSandboxStats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cpu", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cpu == nil { + m.Cpu = &WindowsCpuUsage{} + } + if err := m.Cpu.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Memory == nil { + m.Memory = &WindowsMemoryUsage{} + } + if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &WindowsNetworkUsage{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Process", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Process == nil { + m.Process = &WindowsProcessUsage{} + } + if err := m.Process.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, &WindowsContainerStats{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultInterface", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DefaultInterface == nil { + m.DefaultInterface = &NetworkInterfaceUsage{} + } + if err := m.DefaultInterface.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Interfaces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Interfaces = append(m.Interfaces, &NetworkInterfaceUsage{}) + if err := m.Interfaces[len(m.Interfaces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WindowsNetworkUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowsNetworkUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowsNetworkUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultInterface", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DefaultInterface == nil { + m.DefaultInterface = &WindowsNetworkInterfaceUsage{} + } + if err := m.DefaultInterface.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Interfaces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Interfaces = append(m.Interfaces, &WindowsNetworkInterfaceUsage{}) + if err := m.Interfaces[len(m.Interfaces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkInterfaceUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkInterfaceUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkInterfaceUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RxBytes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RxBytes == nil { + m.RxBytes = &UInt64Value{} + } + if err := m.RxBytes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RxErrors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RxErrors == nil { + m.RxErrors = &UInt64Value{} + } + if err := m.RxErrors.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxBytes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TxBytes == nil { + m.TxBytes = &UInt64Value{} + } + if err := m.TxBytes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxErrors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TxErrors == nil { + m.TxErrors = &UInt64Value{} + } + if err := m.TxErrors.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WindowsNetworkInterfaceUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowsNetworkInterfaceUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowsNetworkInterfaceUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RxBytes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RxBytes == nil { + m.RxBytes = &UInt64Value{} + } + if err := m.RxBytes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RxPacketsDropped", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RxPacketsDropped == nil { + m.RxPacketsDropped = &UInt64Value{} + } + if err := m.RxPacketsDropped.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxBytes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TxBytes == nil { + m.TxBytes = &UInt64Value{} + } + if err := m.TxBytes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxPacketsDropped", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TxPacketsDropped == nil { + m.TxPacketsDropped = &UInt64Value{} + } + if err := m.TxPacketsDropped.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProcessUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProcessUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProcessUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessCount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProcessCount == nil { + m.ProcessCount = &UInt64Value{} + } + if err := m.ProcessCount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WindowsProcessUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowsProcessUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowsProcessUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessCount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProcessCount == nil { + m.ProcessCount = &UInt64Value{} + } + if err := m.ProcessCount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserSpecifiedImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserSpecifiedImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeHandler", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeHandler = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxContainerResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxContainerResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxContainerResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CpuPeriod", wireType) + } + m.CpuPeriod = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CpuPeriod |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CpuQuota", wireType) + } + m.CpuQuota = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CpuQuota |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CpuShares", wireType) + } + m.CpuShares = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CpuShares |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryLimitInBytes", wireType) + } + m.MemoryLimitInBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryLimitInBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OomScoreAdj", wireType) + } + m.OomScoreAdj = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OomScoreAdj |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CpusetCpus", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CpusetCpus = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CpusetMems", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CpusetMems = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HugepageLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HugepageLimits = append(m.HugepageLimits, &HugepageLimit{}) + if err := m.HugepageLimits[len(m.HugepageLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unified", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Unified == nil { + m.Unified = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Unified[mapkey] = mapvalue + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemorySwapLimitInBytes", wireType) + } + m.MemorySwapLimitInBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemorySwapLimitInBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HugepageLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HugepageLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HugepageLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PageSize", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PageSize = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SELinuxOption) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxOption: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxOption: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Capability) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Capability: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Capability: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AddCapabilities = append(m.AddCapabilities, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DropCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DropCapabilities = append(m.DropCapabilities, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddAmbientCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AddAmbientCapabilities = append(m.AddAmbientCapabilities, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxContainerSecurityContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxContainerSecurityContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxContainerSecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capabilities == nil { + m.Capabilities = &Capability{} + } + if err := m.Capabilities.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Privileged = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceOptions == nil { + m.NamespaceOptions = &NamespaceOption{} + } + if err := m.NamespaceOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SelinuxOptions == nil { + m.SelinuxOptions = &SELinuxOption{} + } + if err := m.SelinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RunAsUser == nil { + m.RunAsUser = &Int64Value{} + } + if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUsername", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RunAsUsername = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadonlyRootfs", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadonlyRootfs = bool(v != 0) + case 8: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.SupplementalGroups) == 0 { + m.SupplementalGroups = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApparmorProfile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ApparmorProfile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeccompProfilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SeccompProfilePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoNewPrivs", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoNewPrivs = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RunAsGroup == nil { + m.RunAsGroup = &Int64Value{} + } + if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaskedPaths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MaskedPaths = append(m.MaskedPaths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadonlyPaths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReadonlyPaths = append(m.ReadonlyPaths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Seccomp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Seccomp == nil { + m.Seccomp = &SecurityProfile{} + } + if err := m.Seccomp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Apparmor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Apparmor == nil { + m.Apparmor = &SecurityProfile{} + } + if err := m.Apparmor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxContainerConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxContainerConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxContainerConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &LinuxContainerResources{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &LinuxContainerSecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WindowsNamespaceOption) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowsNamespaceOption: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowsNamespaceOption: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + m.Network = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Network |= NamespaceMode(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WindowsSandboxSecurityContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowsSandboxSecurityContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowsSandboxSecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUsername", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RunAsUsername = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CredentialSpec", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CredentialSpec = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostProcess", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HostProcess = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceOptions == nil { + m.NamespaceOptions = &WindowsNamespaceOption{} + } + if err := m.NamespaceOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WindowsPodSandboxConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowsPodSandboxConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowsPodSandboxConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &WindowsSandboxSecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WindowsContainerSecurityContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowsContainerSecurityContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowsContainerSecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUsername", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RunAsUsername = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CredentialSpec", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CredentialSpec = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostProcess", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HostProcess = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WindowsContainerConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowsContainerConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowsContainerConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &WindowsContainerResources{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &WindowsContainerSecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WindowsContainerResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowsContainerResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowsContainerResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CpuShares", wireType) + } + m.CpuShares = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CpuShares |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CpuCount", wireType) + } + m.CpuCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CpuCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CpuMaximum", wireType) + } + m.CpuMaximum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CpuMaximum |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryLimitInBytes", wireType) + } + m.MemoryLimitInBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryLimitInBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RootfsSizeInBytes", wireType) + } + m.RootfsSizeInBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RootfsSizeInBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Attempt", wireType) + } + m.Attempt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Attempt |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Device) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Device: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Device: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Permissions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Permissions = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CDIDevice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CDIDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CDIDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &ContainerMetadata{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &ImageSpec{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkingDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Envs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Envs = append(m.Envs, &KeyValue{}) + if err := m.Envs[len(m.Envs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mounts = append(m.Mounts, &Mount{}) + if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Devices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Devices = append(m.Devices, &Device{}) + if err := m.Devices[len(m.Devices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StdinOnce = bool(v != 0) + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Tty", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Tty = bool(v != 0) + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Linux == nil { + m.Linux = &LinuxContainerConfig{} + } + if err := m.Linux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Windows", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Windows == nil { + m.Windows = &WindowsContainerConfig{} + } + if err := m.Windows.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CDIDevices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CDIDevices = append(m.CDIDevices, &CDIDevice{}) + if err := m.CDIDevices[len(m.CDIDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateContainerRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateContainerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSandboxId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodSandboxId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &ContainerConfig{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SandboxConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SandboxConfig == nil { + m.SandboxConfig = &PodSandboxConfig{} + } + if err := m.SandboxConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateContainerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateContainerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartContainerRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartContainerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StartContainerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StartContainerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StartContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StopContainerRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StopContainerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StopContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + } + m.Timeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timeout |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StopContainerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StopContainerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StopContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveContainerRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveContainerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveContainerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveContainerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= ContainerState(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerFilter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerFilter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerFilter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.State == nil { + m.State = &ContainerStateValue{} + } + if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSandboxId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodSandboxId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.LabelSelector[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListContainersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListContainersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListContainersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &ContainerFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Container) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Container: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSandboxId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodSandboxId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &ContainerMetadata{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &ImageSpec{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= ContainerState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListContainersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListContainersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListContainersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, &Container{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbose", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Verbose = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &ContainerMetadata{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= ContainerState(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + m.StartedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StartedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) + } + m.FinishedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FinishedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + } + m.ExitCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitCode |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &ImageSpec{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mounts = append(m.Mounts, &Mount{}) + if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &ContainerResources{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &ContainerStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Info == nil { + m.Info = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Info[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Linux == nil { + m.Linux = &LinuxContainerResources{} + } + if err := m.Linux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Windows", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Windows == nil { + m.Windows = &WindowsContainerResources{} + } + if err := m.Windows.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateContainerResourcesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateContainerResourcesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateContainerResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Linux == nil { + m.Linux = &LinuxContainerResources{} + } + if err := m.Linux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Windows", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Windows == nil { + m.Windows = &WindowsContainerResources{} + } + if err := m.Windows.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateContainerResourcesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateContainerResourcesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateContainerResourcesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecSyncRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecSyncRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecSyncRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cmd", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cmd = append(m.Cmd, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + } + m.Timeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timeout |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecSyncResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecSyncResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecSyncResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stdout = append(m.Stdout[:0], dAtA[iNdEx:postIndex]...) + if m.Stdout == nil { + m.Stdout = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stderr = append(m.Stderr[:0], dAtA[iNdEx:postIndex]...) + if m.Stderr == nil { + m.Stderr = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + } + m.ExitCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitCode |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cmd", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cmd = append(m.Cmd, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Tty", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Tty = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdout = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stderr = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Url = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttachRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Tty", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Tty = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdout = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stderr = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttachResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Url = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortForwardRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortForwardRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortForwardRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSandboxId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodSandboxId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = append(m.Port, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Port) == 0 { + m.Port = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = append(m.Port, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortForwardResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortForwardResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortForwardResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Url = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageFilter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageFilter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageFilter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &ImageSpec{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListImagesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListImagesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListImagesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &ImageFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Image) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Image: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Image: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RepoTags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RepoTags = append(m.RepoTags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RepoDigests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RepoDigests = append(m.RepoDigests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Uid == nil { + m.Uid = &Int64Value{} + } + if err := m.Uid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ImageSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Pinned", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Pinned = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListImagesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListImagesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListImagesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, &Image{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &ImageSpec{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbose", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Verbose = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &Image{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Info == nil { + m.Info = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Info[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuthConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuthConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuthConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Auth = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IdentityToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IdentityToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegistryToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RegistryToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PullImageRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PullImageRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PullImageRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &ImageSpec{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Auth == nil { + m.Auth = &AuthConfig{} + } + if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SandboxConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SandboxConfig == nil { + m.SandboxConfig = &PodSandboxConfig{} + } + if err := m.SandboxConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PullImageResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PullImageResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PullImageResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageRef", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageRef = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveImageRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveImageRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveImageRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &ImageSpec{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveImageResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveImageResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveImageResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodCidr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodCidr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NetworkConfig == nil { + m.NetworkConfig = &NetworkConfig{} + } + if err := m.NetworkConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateRuntimeConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateRuntimeConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateRuntimeConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RuntimeConfig == nil { + m.RuntimeConfig = &RuntimeConfig{} + } + if err := m.RuntimeConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateRuntimeConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateRuntimeConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateRuntimeConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Status = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, &RuntimeCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbose", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Verbose = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &RuntimeStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Info == nil { + m.Info = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Info[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageFsInfoRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageFsInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageFsInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FilesystemIdentifier) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FilesystemIdentifier: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FilesystemIdentifier: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mountpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mountpoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FilesystemUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FilesystemUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FilesystemUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FsId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FsId == nil { + m.FsId = &FilesystemIdentifier{} + } + if err := m.FsId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsedBytes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UsedBytes == nil { + m.UsedBytes = &UInt64Value{} + } + if err := m.UsedBytes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InodesUsed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InodesUsed == nil { + m.InodesUsed = &UInt64Value{} + } + if err := m.InodesUsed.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WindowsFilesystemUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowsFilesystemUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowsFilesystemUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FsId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FsId == nil { + m.FsId = &FilesystemIdentifier{} + } + if err := m.FsId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsedBytes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UsedBytes == nil { + m.UsedBytes = &UInt64Value{} + } + if err := m.UsedBytes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageFsInfoResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageFsInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageFsInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageFilesystems", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageFilesystems = append(m.ImageFilesystems, &FilesystemUsage{}) + if err := m.ImageFilesystems[len(m.ImageFilesystems)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerFilesystems", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerFilesystems = append(m.ContainerFilesystems, &FilesystemUsage{}) + if err := m.ContainerFilesystems[len(m.ContainerFilesystems)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Stats == nil { + m.Stats = &ContainerStats{} + } + if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListContainerStatsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListContainerStatsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListContainerStatsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &ContainerStatsFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatsFilter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatsFilter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatsFilter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSandboxId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodSandboxId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.LabelSelector[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListContainerStatsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListContainerStatsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListContainerStatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stats = append(m.Stats, &ContainerStats{}) + if err := m.Stats[len(m.Stats)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &ContainerMetadata{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStats) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attributes == nil { + m.Attributes = &ContainerAttributes{} + } + if err := m.Attributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cpu", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cpu == nil { + m.Cpu = &CpuUsage{} + } + if err := m.Cpu.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Memory == nil { + m.Memory = &MemoryUsage{} + } + if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WritableLayer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WritableLayer == nil { + m.WritableLayer = &FilesystemUsage{} + } + if err := m.WritableLayer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Swap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Swap == nil { + m.Swap = &SwapUsage{} + } + if err := m.Swap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WindowsContainerStats) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowsContainerStats: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowsContainerStats: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attributes == nil { + m.Attributes = &ContainerAttributes{} + } + if err := m.Attributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cpu", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cpu == nil { + m.Cpu = &WindowsCpuUsage{} + } + if err := m.Cpu.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Memory == nil { + m.Memory = &WindowsMemoryUsage{} + } + if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WritableLayer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WritableLayer == nil { + m.WritableLayer = &WindowsFilesystemUsage{} + } + if err := m.WritableLayer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CpuUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CpuUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CpuUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsageCoreNanoSeconds", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UsageCoreNanoSeconds == nil { + m.UsageCoreNanoSeconds = &UInt64Value{} + } + if err := m.UsageCoreNanoSeconds.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsageNanoCores", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UsageNanoCores == nil { + m.UsageNanoCores = &UInt64Value{} + } + if err := m.UsageNanoCores.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WindowsCpuUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowsCpuUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowsCpuUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsageCoreNanoSeconds", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UsageCoreNanoSeconds == nil { + m.UsageCoreNanoSeconds = &UInt64Value{} + } + if err := m.UsageCoreNanoSeconds.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsageNanoCores", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UsageNanoCores == nil { + m.UsageNanoCores = &UInt64Value{} + } + if err := m.UsageNanoCores.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MemoryUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MemoryUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MemoryUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkingSetBytes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WorkingSetBytes == nil { + m.WorkingSetBytes = &UInt64Value{} + } + if err := m.WorkingSetBytes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableBytes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AvailableBytes == nil { + m.AvailableBytes = &UInt64Value{} + } + if err := m.AvailableBytes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsageBytes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UsageBytes == nil { + m.UsageBytes = &UInt64Value{} + } + if err := m.UsageBytes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RssBytes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RssBytes == nil { + m.RssBytes = &UInt64Value{} + } + if err := m.RssBytes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PageFaults", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PageFaults == nil { + m.PageFaults = &UInt64Value{} + } + if err := m.PageFaults.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MajorPageFaults", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MajorPageFaults == nil { + m.MajorPageFaults = &UInt64Value{} + } + if err := m.MajorPageFaults.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SwapUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SwapUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SwapUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SwapAvailableBytes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SwapAvailableBytes == nil { + m.SwapAvailableBytes = &UInt64Value{} + } + if err := m.SwapAvailableBytes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SwapUsageBytes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SwapUsageBytes == nil { + m.SwapUsageBytes = &UInt64Value{} + } + if err := m.SwapUsageBytes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WindowsMemoryUsage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowsMemoryUsage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowsMemoryUsage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkingSetBytes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WorkingSetBytes == nil { + m.WorkingSetBytes = &UInt64Value{} + } + if err := m.WorkingSetBytes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableBytes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AvailableBytes == nil { + m.AvailableBytes = &UInt64Value{} + } + if err := m.AvailableBytes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PageFaults", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PageFaults == nil { + m.PageFaults = &UInt64Value{} + } + if err := m.PageFaults.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommitMemoryBytes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CommitMemoryBytes == nil { + m.CommitMemoryBytes = &UInt64Value{} + } + if err := m.CommitMemoryBytes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReopenContainerLogRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReopenContainerLogRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReopenContainerLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReopenContainerLogResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReopenContainerLogResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReopenContainerLogResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckpointContainerRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckpointContainerRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckpointContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Location", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Location = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + } + m.Timeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timeout |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CheckpointContainerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CheckpointContainerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CheckpointContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetEventsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetEventsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetEventsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerEventResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerEventResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerEventResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerEventType", wireType) + } + m.ContainerEventType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerEventType |= ContainerEventType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSandboxStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodSandboxStatus == nil { + m.PodSandboxStatus = &PodSandboxStatus{} + } + if err := m.PodSandboxStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainersStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainersStatuses = append(m.ContainersStatuses, &ContainerStatus{}) + if err := m.ContainersStatuses[len(m.ContainersStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListMetricDescriptorsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListMetricDescriptorsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListMetricDescriptorsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListMetricDescriptorsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListMetricDescriptorsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListMetricDescriptorsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Descriptors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Descriptors = append(m.Descriptors, &MetricDescriptor{}) + if err := m.Descriptors[len(m.Descriptors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricDescriptor) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricDescriptor: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricDescriptor: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Help", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Help = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelKeys = append(m.LabelKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListPodSandboxMetricsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListPodSandboxMetricsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListPodSandboxMetricsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListPodSandboxMetricsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListPodSandboxMetricsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListPodSandboxMetricsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodMetrics = append(m.PodMetrics, &PodSandboxMetrics{}) + if err := m.PodMetrics[len(m.PodMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSandboxMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSandboxMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSandboxMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSandboxId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodSandboxId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, &Metric{}) + if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerMetrics = append(m.ContainerMetrics, &ContainerMetrics{}) + if err := m.ContainerMetrics[len(m.ContainerMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, &Metric{}) + if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metric) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricType", wireType) + } + m.MetricType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MetricType |= MetricType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelValues", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelValues = append(m.LabelValues, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Value == nil { + m.Value = &UInt64Value{} + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Linux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Linux == nil { + m.Linux = &LinuxRuntimeConfiguration{} + } + if err := m.Linux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LinuxRuntimeConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LinuxRuntimeConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LinuxRuntimeConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CgroupDriver", wireType) + } + m.CgroupDriver = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CgroupDriver |= CgroupDriver(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipApi(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthApi + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupApi + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthApi + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupApi = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto b/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto new file mode 100644 index 0000000000..bb15dc2478 --- /dev/null +++ b/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/api.proto @@ -0,0 +1,1863 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// To regenerate api.pb.go run `hack/update-codegen.sh protobindings` +syntax = "proto3"; + +package runtime.v1; +option go_package = "k8s.io/cri-api/pkg/apis/runtime/v1"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = true; +option (gogoproto.goproto_getters_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_unrecognized_all) = false; + +// Runtime service defines the public APIs for remote container runtimes +service RuntimeService { + // Version returns the runtime name, runtime version, and runtime API version. + rpc Version(VersionRequest) returns (VersionResponse) {} + + // RunPodSandbox creates and starts a pod-level sandbox. Runtimes must ensure + // the sandbox is in the ready state on success. + rpc RunPodSandbox(RunPodSandboxRequest) returns (RunPodSandboxResponse) {} + // StopPodSandbox stops any running process that is part of the sandbox and + // reclaims network resources (e.g., IP addresses) allocated to the sandbox. + // If there are any running containers in the sandbox, they must be forcibly + // terminated. + // This call is idempotent, and must not return an error if all relevant + // resources have already been reclaimed. kubelet will call StopPodSandbox + // at least once before calling RemovePodSandbox. It will also attempt to + // reclaim resources eagerly, as soon as a sandbox is not needed. Hence, + // multiple StopPodSandbox calls are expected. + rpc StopPodSandbox(StopPodSandboxRequest) returns (StopPodSandboxResponse) {} + // RemovePodSandbox removes the sandbox. If there are any running containers + // in the sandbox, they must be forcibly terminated and removed. + // This call is idempotent, and must not return an error if the sandbox has + // already been removed. + rpc RemovePodSandbox(RemovePodSandboxRequest) returns (RemovePodSandboxResponse) {} + // PodSandboxStatus returns the status of the PodSandbox. If the PodSandbox is not + // present, returns an error. + rpc PodSandboxStatus(PodSandboxStatusRequest) returns (PodSandboxStatusResponse) {} + // ListPodSandbox returns a list of PodSandboxes. + rpc ListPodSandbox(ListPodSandboxRequest) returns (ListPodSandboxResponse) {} + + // CreateContainer creates a new container in specified PodSandbox + rpc CreateContainer(CreateContainerRequest) returns (CreateContainerResponse) {} + // StartContainer starts the container. + rpc StartContainer(StartContainerRequest) returns (StartContainerResponse) {} + // StopContainer stops a running container with a grace period (i.e., timeout). + // This call is idempotent, and must not return an error if the container has + // already been stopped. + // The runtime must forcibly kill the container after the grace period is + // reached. + rpc StopContainer(StopContainerRequest) returns (StopContainerResponse) {} + // RemoveContainer removes the container. If the container is running, the + // container must be forcibly removed. + // This call is idempotent, and must not return an error if the container has + // already been removed. + rpc RemoveContainer(RemoveContainerRequest) returns (RemoveContainerResponse) {} + // ListContainers lists all containers by filters. + rpc ListContainers(ListContainersRequest) returns (ListContainersResponse) {} + // ContainerStatus returns status of the container. If the container is not + // present, returns an error. + rpc ContainerStatus(ContainerStatusRequest) returns (ContainerStatusResponse) {} + // UpdateContainerResources updates ContainerConfig of the container synchronously. + // If runtime fails to transactionally update the requested resources, an error is returned. + rpc UpdateContainerResources(UpdateContainerResourcesRequest) returns (UpdateContainerResourcesResponse) {} + // ReopenContainerLog asks runtime to reopen the stdout/stderr log file + // for the container. This is often called after the log file has been + // rotated. If the container is not running, container runtime can choose + // to either create a new log file and return nil, or return an error. + // Once it returns error, new container log file MUST NOT be created. + rpc ReopenContainerLog(ReopenContainerLogRequest) returns (ReopenContainerLogResponse) {} + + // ExecSync runs a command in a container synchronously. + rpc ExecSync(ExecSyncRequest) returns (ExecSyncResponse) {} + // Exec prepares a streaming endpoint to execute a command in the container. + rpc Exec(ExecRequest) returns (ExecResponse) {} + // Attach prepares a streaming endpoint to attach to a running container. + rpc Attach(AttachRequest) returns (AttachResponse) {} + // PortForward prepares a streaming endpoint to forward ports from a PodSandbox. + rpc PortForward(PortForwardRequest) returns (PortForwardResponse) {} + + // ContainerStats returns stats of the container. If the container does not + // exist, the call returns an error. + rpc ContainerStats(ContainerStatsRequest) returns (ContainerStatsResponse) {} + // ListContainerStats returns stats of all running containers. + rpc ListContainerStats(ListContainerStatsRequest) returns (ListContainerStatsResponse) {} + + // PodSandboxStats returns stats of the pod sandbox. If the pod sandbox does not + // exist, the call returns an error. + rpc PodSandboxStats(PodSandboxStatsRequest) returns (PodSandboxStatsResponse) {} + // ListPodSandboxStats returns stats of the pod sandboxes matching a filter. + rpc ListPodSandboxStats(ListPodSandboxStatsRequest) returns (ListPodSandboxStatsResponse) {} + + // UpdateRuntimeConfig updates the runtime configuration based on the given request. + rpc UpdateRuntimeConfig(UpdateRuntimeConfigRequest) returns (UpdateRuntimeConfigResponse) {} + + // Status returns the status of the runtime. + rpc Status(StatusRequest) returns (StatusResponse) {} + + // CheckpointContainer checkpoints a container + rpc CheckpointContainer(CheckpointContainerRequest) returns (CheckpointContainerResponse) {} + + // GetContainerEvents gets container events from the CRI runtime + rpc GetContainerEvents(GetEventsRequest) returns (stream ContainerEventResponse) {} + + // ListMetricDescriptors gets the descriptors for the metrics that will be returned in ListPodSandboxMetrics. + // This list should be static at startup: either the client and server restart together when + // adding or removing metrics descriptors, or they should not change. + // Put differently, if ListPodSandboxMetrics references a name that is not described in the initial + // ListMetricDescriptors call, then the metric will not be broadcasted. + rpc ListMetricDescriptors(ListMetricDescriptorsRequest) returns (ListMetricDescriptorsResponse) {} + + // ListPodSandboxMetrics gets pod sandbox metrics from CRI Runtime + rpc ListPodSandboxMetrics(ListPodSandboxMetricsRequest) returns (ListPodSandboxMetricsResponse) {} + + // RuntimeConfig returns configuration information of the runtime. + // A couple of notes: + // - The RuntimeConfigRequest object is not to be confused with the contents of UpdateRuntimeConfigRequest. + // The former is for having runtime tell Kubelet what to do, the latter vice versa. + // - It is the expectation of the Kubelet that these fields are static for the lifecycle of the Kubelet. + // The Kubelet will not re-request the RuntimeConfiguration after startup, and CRI implementations should + // avoid updating them without a full node reboot. + rpc RuntimeConfig(RuntimeConfigRequest) returns (RuntimeConfigResponse) {} +} + +// ImageService defines the public APIs for managing images. +service ImageService { + // ListImages lists existing images. + rpc ListImages(ListImagesRequest) returns (ListImagesResponse) {} + // ImageStatus returns the status of the image. If the image is not + // present, returns a response with ImageStatusResponse.Image set to + // nil. + rpc ImageStatus(ImageStatusRequest) returns (ImageStatusResponse) {} + // PullImage pulls an image with authentication config. + rpc PullImage(PullImageRequest) returns (PullImageResponse) {} + // RemoveImage removes the image. + // This call is idempotent, and must not return an error if the image has + // already been removed. + rpc RemoveImage(RemoveImageRequest) returns (RemoveImageResponse) {} + // ImageFSInfo returns information of the filesystem that is used to store images. + rpc ImageFsInfo(ImageFsInfoRequest) returns (ImageFsInfoResponse) {} +} + +message VersionRequest { + // Version of the kubelet runtime API. + string version = 1; +} + +message VersionResponse { + // Version of the kubelet runtime API. + string version = 1; + // Name of the container runtime. + string runtime_name = 2; + // Version of the container runtime. The string must be + // semver-compatible. + string runtime_version = 3; + // API version of the container runtime. The string must be + // semver-compatible. + string runtime_api_version = 4; +} + +// DNSConfig specifies the DNS servers and search domains of a sandbox. +message DNSConfig { + // List of DNS servers of the cluster. + repeated string servers = 1; + // List of DNS search domains of the cluster. + repeated string searches = 2; + // List of DNS options. See https://linux.die.net/man/5/resolv.conf + // for all available options. + repeated string options = 3; +} + +enum Protocol { + TCP = 0; + UDP = 1; + SCTP = 2; +} + +// PortMapping specifies the port mapping configurations of a sandbox. +message PortMapping { + // Protocol of the port mapping. + Protocol protocol = 1; + // Port number within the container. Default: 0 (not specified). + int32 container_port = 2; + // Port number on the host. Default: 0 (not specified). + int32 host_port = 3; + // Host IP. + string host_ip = 4; +} + +enum MountPropagation { + // No mount propagation ("rprivate" in Linux terminology). + PROPAGATION_PRIVATE = 0; + // Mounts get propagated from the host to the container ("rslave" in Linux). + PROPAGATION_HOST_TO_CONTAINER = 1; + // Mounts get propagated from the host to the container and from the + // container to the host ("rshared" in Linux). + PROPAGATION_BIDIRECTIONAL = 2; +} + +// Mount specifies a host volume to mount into a container. +message Mount { + // Path of the mount within the container. + string container_path = 1; + // Path of the mount on the host. If the hostPath doesn't exist, then runtimes + // should report error. If the hostpath is a symbolic link, runtimes should + // follow the symlink and mount the real destination to container. + string host_path = 2; + // If set, the mount is read-only. + bool readonly = 3; + // If set, the mount needs SELinux relabeling. + bool selinux_relabel = 4; + // Requested propagation mode. + MountPropagation propagation = 5; + // UidMappings specifies the runtime UID mappings for the mount. + repeated IDMapping uidMappings = 6; + // GidMappings specifies the runtime GID mappings for the mount. + repeated IDMapping gidMappings = 7; +} + +// IDMapping describes host to container ID mappings for a pod sandbox. +message IDMapping { + // HostId is the id on the host. + uint32 host_id = 1; + // ContainerId is the id in the container. + uint32 container_id = 2; + // Length is the size of the range to map. + uint32 length = 3; +} + +// A NamespaceMode describes the intended namespace configuration for each +// of the namespaces (Network, PID, IPC) in NamespaceOption. Runtimes should +// map these modes as appropriate for the technology underlying the runtime. +enum NamespaceMode { + // A POD namespace is common to all containers in a pod. + // For example, a container with a PID namespace of POD expects to view + // all of the processes in all of the containers in the pod. + POD = 0; + // A CONTAINER namespace is restricted to a single container. + // For example, a container with a PID namespace of CONTAINER expects to + // view only the processes in that container. + CONTAINER = 1; + // A NODE namespace is the namespace of the Kubernetes node. + // For example, a container with a PID namespace of NODE expects to view + // all of the processes on the host running the kubelet. + NODE = 2; + // TARGET targets the namespace of another container. When this is specified, + // a target_id must be specified in NamespaceOption and refer to a container + // previously created with NamespaceMode CONTAINER. This containers namespace + // will be made to match that of container target_id. + // For example, a container with a PID namespace of TARGET expects to view + // all of the processes that container target_id can view. + TARGET = 3; +} + +// UserNamespace describes the intended user namespace configuration for a pod sandbox. +message UserNamespace { + // Mode is the NamespaceMode for this UserNamespace. + // Note: NamespaceMode for UserNamespace currently supports only POD and NODE, not CONTAINER OR TARGET. + NamespaceMode mode = 1; + + // Uids specifies the UID mappings for the user namespace. + repeated IDMapping uids = 2; + + // Gids specifies the GID mappings for the user namespace. + repeated IDMapping gids = 3; +} + +// NamespaceOption provides options for Linux namespaces. +message NamespaceOption { + // Network namespace for this container/sandbox. + // Note: There is currently no way to set CONTAINER scoped network in the Kubernetes API. + // Namespaces currently set by the kubelet: POD, NODE + NamespaceMode network = 1; + // PID namespace for this container/sandbox. + // Note: The CRI default is POD, but the v1.PodSpec default is CONTAINER. + // The kubelet's runtime manager will set this to CONTAINER explicitly for v1 pods. + // Namespaces currently set by the kubelet: POD, CONTAINER, NODE, TARGET + NamespaceMode pid = 2; + // IPC namespace for this container/sandbox. + // Note: There is currently no way to set CONTAINER scoped IPC in the Kubernetes API. + // Namespaces currently set by the kubelet: POD, NODE + NamespaceMode ipc = 3; + // Target Container ID for NamespaceMode of TARGET. This container must have been + // previously created in the same pod. It is not possible to specify different targets + // for each namespace. + string target_id = 4; + // UsernsOptions for this pod sandbox. + // The Kubelet picks the user namespace configuration to use for the pod sandbox. The mappings + // are specified as part of the UserNamespace struct. If the struct is nil, then the POD mode + // must be assumed. This is done for backward compatibility with older Kubelet versions that + // do not set a user namespace. + UserNamespace userns_options = 5; +} + +// Int64Value is the wrapper of int64. +message Int64Value { + // The value. + int64 value = 1; +} + +// LinuxSandboxSecurityContext holds linux security configuration that will be +// applied to a sandbox. Note that: +// 1) It does not apply to containers in the pods. +// 2) It may not be applicable to a PodSandbox which does not contain any running +// process. +message LinuxSandboxSecurityContext { + // Configurations for the sandbox's namespaces. + // This will be used only if the PodSandbox uses namespace for isolation. + NamespaceOption namespace_options = 1; + // Optional SELinux context to be applied. + SELinuxOption selinux_options = 2; + // UID to run sandbox processes as, when applicable. + Int64Value run_as_user = 3; + // GID to run sandbox processes as, when applicable. run_as_group should only + // be specified when run_as_user is specified; otherwise, the runtime MUST error. + Int64Value run_as_group = 8; + // If set, the root filesystem of the sandbox is read-only. + bool readonly_rootfs = 4; + // List of groups applied to the first process run in the sandbox, in + // addition to the sandbox's primary GID, and group memberships defined + // in the container image for the sandbox's primary UID of the container process. + // If the list is empty, no additional groups are added to any container. + // Note that group memberships defined in the container image for the sandbox's primary UID + // of the container process are still effective, even if they are not included in this list. + repeated int64 supplemental_groups = 5; + // Indicates whether the sandbox will be asked to run a privileged + // container. If a privileged container is to be executed within it, this + // MUST be true. + // This allows a sandbox to take additional security precautions if no + // privileged containers are expected to be run. + bool privileged = 6; + // Seccomp profile for the sandbox. + SecurityProfile seccomp = 9; + // AppArmor profile for the sandbox. + SecurityProfile apparmor = 10; + // Seccomp profile for the sandbox, candidate values are: + // * runtime/default: the default profile for the container runtime + // * unconfined: unconfined profile, ie, no seccomp sandboxing + // * localhost/: the profile installed on the node. + // is the full path of the profile. + // Default: "", which is identical with unconfined. + string seccomp_profile_path = 7 [deprecated=true]; +} + +// A security profile which can be used for sandboxes and containers. +message SecurityProfile { + // Available profile types. + enum ProfileType { + // The container runtime default profile should be used. + RuntimeDefault = 0; + // Disable the feature for the sandbox or the container. + Unconfined = 1; + // A pre-defined profile on the node should be used. + Localhost = 2; + } + // Indicator which `ProfileType` should be applied. + ProfileType profile_type = 1; + // Indicates that a pre-defined profile on the node should be used. + // Must only be set if `ProfileType` is `Localhost`. + // For seccomp, it must be an absolute path to the seccomp profile. + // For AppArmor, this field is the AppArmor `/` + string localhost_ref = 2; +} + +// LinuxPodSandboxConfig holds platform-specific configurations for Linux +// host platforms and Linux-based containers. +message LinuxPodSandboxConfig { + // Parent cgroup of the PodSandbox. + // The cgroupfs style syntax will be used, but the container runtime can + // convert it to systemd semantics if needed. + string cgroup_parent = 1; + // LinuxSandboxSecurityContext holds sandbox security attributes. + LinuxSandboxSecurityContext security_context = 2; + // Sysctls holds linux sysctls config for the sandbox. + map sysctls = 3; + // Optional overhead represents the overheads associated with this sandbox + LinuxContainerResources overhead = 4; + // Optional resources represents the sum of container resources for this sandbox + LinuxContainerResources resources = 5; +} + +// PodSandboxMetadata holds all necessary information for building the sandbox name. +// The container runtime is encouraged to expose the metadata associated with the +// PodSandbox in its user interface for better user experience. For example, +// the runtime can construct a unique PodSandboxName based on the metadata. +message PodSandboxMetadata { + // Pod name of the sandbox. Same as the pod name in the Pod ObjectMeta. + string name = 1; + // Pod UID of the sandbox. Same as the pod UID in the Pod ObjectMeta. + string uid = 2; + // Pod namespace of the sandbox. Same as the pod namespace in the Pod ObjectMeta. + string namespace = 3; + // Attempt number of creating the sandbox. Default: 0. + uint32 attempt = 4; +} + +// PodSandboxConfig holds all the required and optional fields for creating a +// sandbox. +message PodSandboxConfig { + // Metadata of the sandbox. This information will uniquely identify the + // sandbox, and the runtime should leverage this to ensure correct + // operation. The runtime may also use this information to improve UX, such + // as by constructing a readable name. + PodSandboxMetadata metadata = 1; + // Hostname of the sandbox. Hostname could only be empty when the pod + // network namespace is NODE. + string hostname = 2; + // Path to the directory on the host in which container log files are + // stored. + // By default the log of a container going into the LogDirectory will be + // hooked up to STDOUT and STDERR. However, the LogDirectory may contain + // binary log files with structured logging data from the individual + // containers. For example, the files might be newline separated JSON + // structured logs, systemd-journald journal files, gRPC trace files, etc. + // E.g., + // PodSandboxConfig.LogDirectory = `/var/log/pods/__/` + // ContainerConfig.LogPath = `containerName/Instance#.log` + string log_directory = 3; + // DNS config for the sandbox. + DNSConfig dns_config = 4; + // Port mappings for the sandbox. + repeated PortMapping port_mappings = 5; + // Key-value pairs that may be used to scope and select individual resources. + map labels = 6; + // Unstructured key-value map that may be set by the kubelet to store and + // retrieve arbitrary metadata. This will include any annotations set on a + // pod through the Kubernetes API. + // + // Annotations MUST NOT be altered by the runtime; the annotations stored + // here MUST be returned in the PodSandboxStatus associated with the pod + // this PodSandboxConfig creates. + // + // In general, in order to preserve a well-defined interface between the + // kubelet and the container runtime, annotations SHOULD NOT influence + // runtime behaviour. + // + // Annotations can also be useful for runtime authors to experiment with + // new features that are opaque to the Kubernetes APIs (both user-facing + // and the CRI). Whenever possible, however, runtime authors SHOULD + // consider proposing new typed fields for any new features instead. + map annotations = 7; + // Optional configurations specific to Linux hosts. + LinuxPodSandboxConfig linux = 8; + // Optional configurations specific to Windows hosts. + WindowsPodSandboxConfig windows = 9; +} + +message RunPodSandboxRequest { + // Configuration for creating a PodSandbox. + PodSandboxConfig config = 1; + // Named runtime configuration to use for this PodSandbox. + // If the runtime handler is unknown, this request should be rejected. An + // empty string should select the default handler, equivalent to the + // behavior before this feature was added. + // See https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class + string runtime_handler = 2; +} + +message RunPodSandboxResponse { + // ID of the PodSandbox to run. + string pod_sandbox_id = 1; +} + +message StopPodSandboxRequest { + // ID of the PodSandbox to stop. + string pod_sandbox_id = 1; +} + +message StopPodSandboxResponse {} + +message RemovePodSandboxRequest { + // ID of the PodSandbox to remove. + string pod_sandbox_id = 1; +} + +message RemovePodSandboxResponse {} + +message PodSandboxStatusRequest { + // ID of the PodSandbox for which to retrieve status. + string pod_sandbox_id = 1; + // Verbose indicates whether to return extra information about the pod sandbox. + bool verbose = 2; +} + +// PodIP represents an ip of a Pod +message PodIP{ + // an ip is a string representation of an IPv4 or an IPv6 + string ip = 1; +} +// PodSandboxNetworkStatus is the status of the network for a PodSandbox. +// Currently ignored for pods sharing the host networking namespace. +message PodSandboxNetworkStatus { + // IP address of the PodSandbox. + string ip = 1; + // list of additional ips (not inclusive of PodSandboxNetworkStatus.Ip) of the PodSandBoxNetworkStatus + repeated PodIP additional_ips = 2; +} + +// Namespace contains paths to the namespaces. +message Namespace { + // Namespace options for Linux namespaces. + NamespaceOption options = 2; +} + +// LinuxSandboxStatus contains status specific to Linux sandboxes. +message LinuxPodSandboxStatus { + // Paths to the sandbox's namespaces. + Namespace namespaces = 1; +} + +enum PodSandboxState { + SANDBOX_READY = 0; + SANDBOX_NOTREADY = 1; +} + +// PodSandboxStatus contains the status of the PodSandbox. +message PodSandboxStatus { + // ID of the sandbox. + string id = 1; + // Metadata of the sandbox. + PodSandboxMetadata metadata = 2; + // State of the sandbox. + PodSandboxState state = 3; + // Creation timestamp of the sandbox in nanoseconds. Must be > 0. + int64 created_at = 4; + // Network contains network status if network is handled by the runtime. + PodSandboxNetworkStatus network = 5; + // Linux-specific status to a pod sandbox. + LinuxPodSandboxStatus linux = 6; + // Labels are key-value pairs that may be used to scope and select individual resources. + map labels = 7; + // Unstructured key-value map holding arbitrary metadata. + // Annotations MUST NOT be altered by the runtime; the value of this field + // MUST be identical to that of the corresponding PodSandboxConfig used to + // instantiate the pod sandbox this status represents. + map annotations = 8; + // runtime configuration used for this PodSandbox. + string runtime_handler = 9; +} + +message PodSandboxStatusResponse { + // Status of the PodSandbox. + PodSandboxStatus status = 1; + // Info is extra information of the PodSandbox. The key could be arbitrary string, and + // value should be in json format. The information could include anything useful for + // debug, e.g. network namespace for linux container based container runtime. + // It should only be returned non-empty when Verbose is true. + map info = 2; + // Container statuses + repeated ContainerStatus containers_statuses = 3; + // Timestamp at which container and pod statuses were recorded + int64 timestamp = 4; +} + +// PodSandboxStateValue is the wrapper of PodSandboxState. +message PodSandboxStateValue { + // State of the sandbox. + PodSandboxState state = 1; +} + +// PodSandboxFilter is used to filter a list of PodSandboxes. +// All those fields are combined with 'AND' +message PodSandboxFilter { + // ID of the sandbox. + string id = 1; + // State of the sandbox. + PodSandboxStateValue state = 2; + // LabelSelector to select matches. + // Only api.MatchLabels is supported for now and the requirements + // are ANDed. MatchExpressions is not supported yet. + map label_selector = 3; +} + +message ListPodSandboxRequest { + // PodSandboxFilter to filter a list of PodSandboxes. + PodSandboxFilter filter = 1; +} + + +// PodSandbox contains minimal information about a sandbox. +message PodSandbox { + // ID of the PodSandbox. + string id = 1; + // Metadata of the PodSandbox. + PodSandboxMetadata metadata = 2; + // State of the PodSandbox. + PodSandboxState state = 3; + // Creation timestamps of the PodSandbox in nanoseconds. Must be > 0. + int64 created_at = 4; + // Labels of the PodSandbox. + map labels = 5; + // Unstructured key-value map holding arbitrary metadata. + // Annotations MUST NOT be altered by the runtime; the value of this field + // MUST be identical to that of the corresponding PodSandboxConfig used to + // instantiate this PodSandbox. + map annotations = 6; + // runtime configuration used for this PodSandbox. + string runtime_handler = 7; +} + +message ListPodSandboxResponse { + // List of PodSandboxes. + repeated PodSandbox items = 1; +} + +message PodSandboxStatsRequest { + // ID of the pod sandbox for which to retrieve stats. + string pod_sandbox_id = 1; +} + +message PodSandboxStatsResponse { + PodSandboxStats stats = 1; +} + +// PodSandboxStatsFilter is used to filter the list of pod sandboxes to retrieve stats for. +// All those fields are combined with 'AND'. +message PodSandboxStatsFilter { + // ID of the pod sandbox. + string id = 1; + // LabelSelector to select matches. + // Only api.MatchLabels is supported for now and the requirements + // are ANDed. MatchExpressions is not supported yet. + map label_selector = 2; +} + +message ListPodSandboxStatsRequest { + // Filter for the list request. + PodSandboxStatsFilter filter = 1; +} + +message ListPodSandboxStatsResponse { + // Stats of the pod sandbox. + repeated PodSandboxStats stats = 1; +} + +// PodSandboxAttributes provides basic information of the pod sandbox. +message PodSandboxAttributes { + // ID of the pod sandbox. + string id = 1; + // Metadata of the pod sandbox. + PodSandboxMetadata metadata = 2; + // Key-value pairs that may be used to scope and select individual resources. + map labels = 3; + // Unstructured key-value map holding arbitrary metadata. + // Annotations MUST NOT be altered by the runtime; the value of this field + // MUST be identical to that of the corresponding PodSandboxStatus used to + // instantiate the PodSandbox this status represents. + map annotations = 4; +} + +// PodSandboxStats provides the resource usage statistics for a pod. +// The linux or windows field will be populated depending on the platform. +message PodSandboxStats { + // Information of the pod. + PodSandboxAttributes attributes = 1; + // Stats from linux. + LinuxPodSandboxStats linux = 2; + // Stats from windows. + WindowsPodSandboxStats windows = 3; +} + +// LinuxPodSandboxStats provides the resource usage statistics for a pod sandbox on linux. +message LinuxPodSandboxStats { + // CPU usage gathered for the pod sandbox. + CpuUsage cpu = 1; + // Memory usage gathered for the pod sandbox. + MemoryUsage memory = 2; + // Network usage gathered for the pod sandbox + NetworkUsage network = 3; + // Stats pertaining to processes in the pod sandbox. + ProcessUsage process = 4; + // Stats of containers in the measured pod sandbox. + repeated ContainerStats containers = 5; +} + +// WindowsPodSandboxStats provides the resource usage statistics for a pod sandbox on windows +message WindowsPodSandboxStats { + // CPU usage gathered for the pod sandbox. + WindowsCpuUsage cpu = 1; + // Memory usage gathered for the pod sandbox. + WindowsMemoryUsage memory = 2; + // Network usage gathered for the pod sandbox + WindowsNetworkUsage network = 3; + // Stats pertaining to processes in the pod sandbox. + WindowsProcessUsage process = 4; + // Stats of containers in the measured pod sandbox. + repeated WindowsContainerStats containers = 5; +} + +// NetworkUsage contains data about network resources. +message NetworkUsage { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + int64 timestamp = 1; + // Stats for the default network interface. + NetworkInterfaceUsage default_interface = 2; + // Stats for all found network interfaces, excluding the default. + repeated NetworkInterfaceUsage interfaces = 3; +} + +// WindowsNetworkUsage contains data about network resources specific to Windows. +message WindowsNetworkUsage { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + int64 timestamp = 1; + // Stats for the default network interface. + WindowsNetworkInterfaceUsage default_interface = 2; + // Stats for all found network interfaces, excluding the default. + repeated WindowsNetworkInterfaceUsage interfaces = 3; +} + +// NetworkInterfaceUsage contains resource value data about a network interface. +message NetworkInterfaceUsage { + // The name of the network interface. + string name = 1; + // Cumulative count of bytes received. + UInt64Value rx_bytes = 2; + // Cumulative count of receive errors encountered. + UInt64Value rx_errors = 3; + // Cumulative count of bytes transmitted. + UInt64Value tx_bytes = 4; + // Cumulative count of transmit errors encountered. + UInt64Value tx_errors = 5; +} + +// WindowsNetworkInterfaceUsage contains resource value data about a network interface specific for Windows. +message WindowsNetworkInterfaceUsage { + // The name of the network interface. + string name = 1; + // Cumulative count of bytes received. + UInt64Value rx_bytes = 2; + // Cumulative count of receive errors encountered. + UInt64Value rx_packets_dropped = 3; + // Cumulative count of bytes transmitted. + UInt64Value tx_bytes = 4; + // Cumulative count of transmit errors encountered. + UInt64Value tx_packets_dropped = 5; +} + +// ProcessUsage are stats pertaining to processes. +message ProcessUsage { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + int64 timestamp = 1; + // Number of processes. + UInt64Value process_count = 2; +} + +// WindowsProcessUsage are stats pertaining to processes specific to Windows. +message WindowsProcessUsage { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + int64 timestamp = 1; + // Number of processes. + UInt64Value process_count = 2; +} + +// ImageSpec is an internal representation of an image. +message ImageSpec { + // Container's Image field (e.g. imageID or imageDigest). + string image = 1; + // Unstructured key-value map holding arbitrary metadata. + // ImageSpec Annotations can be used to help the runtime target specific + // images in multi-arch images. + map annotations = 2; + // The container image reference specified by the user (e.g. image[:tag] or digest). + // Only set if available within the RPC context. + string user_specified_image = 18; + // Runtime handler to use for pulling the image. + // If the runtime handler is unknown, the request should be rejected. + // An empty string would select the default runtime handler. + string runtime_handler = 19; +} + +message KeyValue { + string key = 1; + string value = 2; +} + +// LinuxContainerResources specifies Linux specific configuration for +// resources. +message LinuxContainerResources { + // CPU CFS (Completely Fair Scheduler) period. Default: 0 (not specified). + int64 cpu_period = 1; + // CPU CFS (Completely Fair Scheduler) quota. Default: 0 (not specified). + int64 cpu_quota = 2; + // CPU shares (relative weight vs. other containers). Default: 0 (not specified). + int64 cpu_shares = 3; + // Memory limit in bytes. Default: 0 (not specified). + int64 memory_limit_in_bytes = 4; + // OOMScoreAdj adjusts the oom-killer score. Default: 0 (not specified). + int64 oom_score_adj = 5; + // CpusetCpus constrains the allowed set of logical CPUs. Default: "" (not specified). + string cpuset_cpus = 6; + // CpusetMems constrains the allowed set of memory nodes. Default: "" (not specified). + string cpuset_mems = 7; + // List of HugepageLimits to limit the HugeTLB usage of container per page size. Default: nil (not specified). + repeated HugepageLimit hugepage_limits = 8; + // Unified resources for cgroup v2. Default: nil (not specified). + // Each key/value in the map refers to the cgroup v2. + // e.g. "memory.max": "6937202688" or "io.weight": "default 100". + map unified = 9; + // Memory swap limit in bytes. Default 0 (not specified). + int64 memory_swap_limit_in_bytes = 10; +} + +// HugepageLimit corresponds to the file`hugetlb..limit_in_byte` in container level cgroup. +// For example, `PageSize=1GB`, `Limit=1073741824` means setting `1073741824` bytes to hugetlb.1GB.limit_in_bytes. +message HugepageLimit { + // The value of PageSize has the format B (2MB, 1GB), + // and must match the of the corresponding control file found in `hugetlb..limit_in_bytes`. + // The values of are intended to be parsed using base 1024("1KB" = 1024, "1MB" = 1048576, etc). + string page_size = 1; + // limit in bytes of hugepagesize HugeTLB usage. + uint64 limit = 2; +} + +// SELinuxOption are the labels to be applied to the container. +message SELinuxOption { + string user = 1; + string role = 2; + string type = 3; + string level = 4; +} + +// Capability contains the container capabilities to add or drop +// Dropping a capability will drop it from all sets. +// If a capability is added to only the add_capabilities list then it gets added to permitted, +// inheritable, effective and bounding sets, i.e. all sets except the ambient set. +// If a capability is added to only the add_ambient_capabilities list then it gets added to all sets, i.e permitted +// inheritable, effective, bounding and ambient sets. +// If a capability is added to add_capabilities and add_ambient_capabilities lists then it gets added to all sets, i.e. +// permitted, inheritable, effective, bounding and ambient sets. +message Capability { + // List of capabilities to add. + repeated string add_capabilities = 1; + // List of capabilities to drop. + repeated string drop_capabilities = 2; + // List of ambient capabilities to add. + repeated string add_ambient_capabilities = 3; +} + +// LinuxContainerSecurityContext holds linux security configuration that will be applied to a container. +message LinuxContainerSecurityContext { + // Capabilities to add or drop. + Capability capabilities = 1; + // If set, run container in privileged mode. + // Privileged mode is incompatible with the following options. If + // privileged is set, the following features MAY have no effect: + // 1. capabilities + // 2. selinux_options + // 4. seccomp + // 5. apparmor + // + // Privileged mode implies the following specific options are applied: + // 1. All capabilities are added. + // 2. Sensitive paths, such as kernel module paths within sysfs, are not masked. + // 3. Any sysfs and procfs mounts are mounted RW. + // 4. AppArmor confinement is not applied. + // 5. Seccomp restrictions are not applied. + // 6. The device cgroup does not restrict access to any devices. + // 7. All devices from the host's /dev are available within the container. + // 8. SELinux restrictions are not applied (e.g. label=disabled). + bool privileged = 2; + // Configurations for the container's namespaces. + // Only used if the container uses namespace for isolation. + NamespaceOption namespace_options = 3; + // SELinux context to be optionally applied. + SELinuxOption selinux_options = 4; + // UID to run the container process as. Only one of run_as_user and + // run_as_username can be specified at a time. + Int64Value run_as_user = 5; + // GID to run the container process as. run_as_group should only be specified + // when run_as_user or run_as_username is specified; otherwise, the runtime + // MUST error. + Int64Value run_as_group = 12; + // User name to run the container process as. If specified, the user MUST + // exist in the container image (i.e. in the /etc/passwd inside the image), + // and be resolved there by the runtime; otherwise, the runtime MUST error. + string run_as_username = 6; + // If set, the root filesystem of the container is read-only. + bool readonly_rootfs = 7; + // List of groups applied to the first process run in the container, in + // addition to the container's primary GID, and group memberships defined + // in the container image for the container's primary UID of the container process. + // If the list is empty, no additional groups are added to any container. + // Note that group memberships defined in the container image for the container's primary UID + // of the container process are still effective, even if they are not included in this list. + repeated int64 supplemental_groups = 8; + // no_new_privs defines if the flag for no_new_privs should be set on the + // container. + bool no_new_privs = 11; + // masked_paths is a slice of paths that should be masked by the container + // runtime, this can be passed directly to the OCI spec. + repeated string masked_paths = 13; + // readonly_paths is a slice of paths that should be set as readonly by the + // container runtime, this can be passed directly to the OCI spec. + repeated string readonly_paths = 14; + // Seccomp profile for the container. + SecurityProfile seccomp = 15; + // AppArmor profile for the container. + SecurityProfile apparmor = 16; + // AppArmor profile for the container, candidate values are: + // * runtime/default: equivalent to not specifying a profile. + // * unconfined: no profiles are loaded + // * localhost/: profile loaded on the node + // (localhost) by name. The possible profile names are detailed at + // https://gitlab.com/apparmor/apparmor/-/wikis/AppArmor_Core_Policy_Reference + string apparmor_profile = 9 [deprecated=true]; + // Seccomp profile for the container, candidate values are: + // * runtime/default: the default profile for the container runtime + // * unconfined: unconfined profile, ie, no seccomp sandboxing + // * localhost/: the profile installed on the node. + // is the full path of the profile. + // Default: "", which is identical with unconfined. + string seccomp_profile_path = 10 [deprecated=true]; +} + +// LinuxContainerConfig contains platform-specific configuration for +// Linux-based containers. +message LinuxContainerConfig { + // Resources specification for the container. + LinuxContainerResources resources = 1; + // LinuxContainerSecurityContext configuration for the container. + LinuxContainerSecurityContext security_context = 2; +} + +// WindowsNamespaceOption provides options for Windows namespaces. +message WindowsNamespaceOption { + // Network namespace for this container/sandbox. + // Namespaces currently set by the kubelet: POD, NODE + NamespaceMode network = 1; +} + +// WindowsSandboxSecurityContext holds platform-specific configurations that will be +// applied to a sandbox. +// These settings will only apply to the sandbox container. +message WindowsSandboxSecurityContext { + // User name to run the container process as. If specified, the user MUST + // exist in the container image and be resolved there by the runtime; + // otherwise, the runtime MUST return error. + string run_as_username = 1; + + // The contents of the GMSA credential spec to use to run this container. + string credential_spec = 2; + + // Indicates whether the container requested to run as a HostProcess container. + bool host_process = 3; + + // Configuration for the sandbox's namespaces + WindowsNamespaceOption namespace_options = 4; +} + +// WindowsPodSandboxConfig holds platform-specific configurations for Windows +// host platforms and Windows-based containers. +message WindowsPodSandboxConfig { + // WindowsSandboxSecurityContext holds sandbox security attributes. + WindowsSandboxSecurityContext security_context = 1; +} + +// WindowsContainerSecurityContext holds windows security configuration that will be applied to a container. +message WindowsContainerSecurityContext { + // User name to run the container process as. If specified, the user MUST + // exist in the container image and be resolved there by the runtime; + // otherwise, the runtime MUST return error. + string run_as_username = 1; + + // The contents of the GMSA credential spec to use to run this container. + string credential_spec = 2; + + // Indicates whether a container is to be run as a HostProcess container. + bool host_process = 3; +} + +// WindowsContainerConfig contains platform-specific configuration for +// Windows-based containers. +message WindowsContainerConfig { + // Resources specification for the container. + WindowsContainerResources resources = 1; + // WindowsContainerSecurityContext configuration for the container. + WindowsContainerSecurityContext security_context = 2; +} + +// WindowsContainerResources specifies Windows specific configuration for +// resources. +message WindowsContainerResources { + // CPU shares (relative weight vs. other containers). Default: 0 (not specified). + int64 cpu_shares = 1; + // Number of CPUs available to the container. Default: 0 (not specified). + int64 cpu_count = 2; + // Specifies the portion of processor cycles that this container can use as a percentage times 100. + int64 cpu_maximum = 3; + // Memory limit in bytes. Default: 0 (not specified). + int64 memory_limit_in_bytes = 4; + // Specifies the size of the rootfs / scratch space in bytes to be configured for this container. Default: 0 (not specified). + int64 rootfs_size_in_bytes = 5; +} + +// ContainerMetadata holds all necessary information for building the container +// name. The container runtime is encouraged to expose the metadata in its user +// interface for better user experience. E.g., runtime can construct a unique +// container name based on the metadata. Note that (name, attempt) is unique +// within a sandbox for the entire lifetime of the sandbox. +message ContainerMetadata { + // Name of the container. Same as the container name in the PodSpec. + string name = 1; + // Attempt number of creating the container. Default: 0. + uint32 attempt = 2; +} + +// Device specifies a host device to mount into a container. +message Device { + // Path of the device within the container. + string container_path = 1; + // Path of the device on the host. + string host_path = 2; + // Cgroups permissions of the device, candidates are one or more of + // * r - allows container to read from the specified device. + // * w - allows container to write to the specified device. + // * m - allows container to create device files that do not yet exist. + string permissions = 3; +} + +// CDIDevice specifies a CDI device information. +message CDIDevice { + // Fully qualified CDI device name + // for example: vendor.com/gpu=gpudevice1 + // see more details in the CDI specification: + // https://github.com/container-orchestrated-devices/container-device-interface/blob/main/SPEC.md + string name = 1; +} + +// ContainerConfig holds all the required and optional fields for creating a +// container. +message ContainerConfig { + // Metadata of the container. This information will uniquely identify the + // container, and the runtime should leverage this to ensure correct + // operation. The runtime may also use this information to improve UX, such + // as by constructing a readable name. + ContainerMetadata metadata = 1 ; + // Image to use. + ImageSpec image = 2; + // Command to execute (i.e., entrypoint for docker) + repeated string command = 3; + // Args for the Command (i.e., command for docker) + repeated string args = 4; + // Current working directory of the command. + string working_dir = 5; + // List of environment variable to set in the container. + repeated KeyValue envs = 6; + // Mounts for the container. + repeated Mount mounts = 7; + // Devices for the container. + repeated Device devices = 8; + // Key-value pairs that may be used to scope and select individual resources. + // Label keys are of the form: + // label-key ::= prefixed-name | name + // prefixed-name ::= prefix '/' name + // prefix ::= DNS_SUBDOMAIN + // name ::= DNS_LABEL + map labels = 9; + // Unstructured key-value map that may be used by the kubelet to store and + // retrieve arbitrary metadata. + // + // Annotations MUST NOT be altered by the runtime; the annotations stored + // here MUST be returned in the ContainerStatus associated with the container + // this ContainerConfig creates. + // + // In general, in order to preserve a well-defined interface between the + // kubelet and the container runtime, annotations SHOULD NOT influence + // runtime behaviour. + map annotations = 10; + // Path relative to PodSandboxConfig.LogDirectory for container to store + // the log (STDOUT and STDERR) on the host. + // E.g., + // PodSandboxConfig.LogDirectory = `/var/log/pods/__/` + // ContainerConfig.LogPath = `containerName/Instance#.log` + string log_path = 11; + + // Variables for interactive containers, these have very specialized + // use-cases (e.g. debugging). + bool stdin = 12; + bool stdin_once = 13; + bool tty = 14; + + // Configuration specific to Linux containers. + LinuxContainerConfig linux = 15; + // Configuration specific to Windows containers. + WindowsContainerConfig windows = 16; + + // CDI devices for the container. + repeated CDIDevice CDI_devices = 17; +} + +message CreateContainerRequest { + // ID of the PodSandbox in which the container should be created. + string pod_sandbox_id = 1; + // Config of the container. + ContainerConfig config = 2; + // Config of the PodSandbox. This is the same config that was passed + // to RunPodSandboxRequest to create the PodSandbox. It is passed again + // here just for easy reference. The PodSandboxConfig is immutable and + // remains the same throughout the lifetime of the pod. + PodSandboxConfig sandbox_config = 3; +} + +message CreateContainerResponse { + // ID of the created container. + string container_id = 1; +} + +message StartContainerRequest { + // ID of the container to start. + string container_id = 1; +} + +message StartContainerResponse {} + +message StopContainerRequest { + // ID of the container to stop. + string container_id = 1; + // Timeout in seconds to wait for the container to stop before forcibly + // terminating it. Default: 0 (forcibly terminate the container immediately) + int64 timeout = 2; +} + +message StopContainerResponse {} + +message RemoveContainerRequest { + // ID of the container to remove. + string container_id = 1; +} + +message RemoveContainerResponse {} + +enum ContainerState { + CONTAINER_CREATED = 0; + CONTAINER_RUNNING = 1; + CONTAINER_EXITED = 2; + CONTAINER_UNKNOWN = 3; +} + +// ContainerStateValue is the wrapper of ContainerState. +message ContainerStateValue { + // State of the container. + ContainerState state = 1; +} + +// ContainerFilter is used to filter containers. +// All those fields are combined with 'AND' +message ContainerFilter { + // ID of the container. + string id = 1; + // State of the container. + ContainerStateValue state = 2; + // ID of the PodSandbox. + string pod_sandbox_id = 3; + // LabelSelector to select matches. + // Only api.MatchLabels is supported for now and the requirements + // are ANDed. MatchExpressions is not supported yet. + map label_selector = 4; +} + +message ListContainersRequest { + ContainerFilter filter = 1; +} + +// Container provides the runtime information for a container, such as ID, hash, +// state of the container. +message Container { + // ID of the container, used by the container runtime to identify + // a container. + string id = 1; + // ID of the sandbox to which this container belongs. + string pod_sandbox_id = 2; + // Metadata of the container. + ContainerMetadata metadata = 3; + // Spec of the image. + ImageSpec image = 4; + // Reference to the image in use. For most runtimes, this should be an + // image ID. + string image_ref = 5; + // State of the container. + ContainerState state = 6; + // Creation time of the container in nanoseconds. + int64 created_at = 7; + // Key-value pairs that may be used to scope and select individual resources. + map labels = 8; + // Unstructured key-value map holding arbitrary metadata. + // Annotations MUST NOT be altered by the runtime; the value of this field + // MUST be identical to that of the corresponding ContainerConfig used to + // instantiate this Container. + map annotations = 9; +} + +message ListContainersResponse { + // List of containers. + repeated Container containers = 1; +} + +message ContainerStatusRequest { + // ID of the container for which to retrieve status. + string container_id = 1; + // Verbose indicates whether to return extra information about the container. + bool verbose = 2; +} + +// ContainerStatus represents the status of a container. +message ContainerStatus { + // ID of the container. + string id = 1; + // Metadata of the container. + ContainerMetadata metadata = 2; + // Status of the container. + ContainerState state = 3; + // Creation time of the container in nanoseconds. + int64 created_at = 4; + // Start time of the container in nanoseconds. Default: 0 (not specified). + int64 started_at = 5; + // Finish time of the container in nanoseconds. Default: 0 (not specified). + int64 finished_at = 6; + // Exit code of the container. Only required when finished_at != 0. Default: 0. + int32 exit_code = 7; + // Spec of the image. + ImageSpec image = 8; + // Reference to the image in use. For most runtimes, this should be an + // image ID + string image_ref = 9; + // Brief CamelCase string explaining why container is in its current state. + // Must be set to "OOMKilled" for containers terminated by cgroup-based Out-of-Memory killer. + string reason = 10; + // Human-readable message indicating details about why container is in its + // current state. + string message = 11; + // Key-value pairs that may be used to scope and select individual resources. + map labels = 12; + // Unstructured key-value map holding arbitrary metadata. + // Annotations MUST NOT be altered by the runtime; the value of this field + // MUST be identical to that of the corresponding ContainerConfig used to + // instantiate the Container this status represents. + map annotations = 13; + // Mounts for the container. + repeated Mount mounts = 14; + // Log path of container. + string log_path = 15; + // Resource limits configuration of the container. + ContainerResources resources = 16; +} + +message ContainerStatusResponse { + // Status of the container. + ContainerStatus status = 1; + // Info is extra information of the Container. The key could be arbitrary string, and + // value should be in json format. The information could include anything useful for + // debug, e.g. pid for linux container based container runtime. + // It should only be returned non-empty when Verbose is true. + map info = 2; +} + +// ContainerResources holds resource limits configuration for a container. +message ContainerResources { + // Resource limits configuration specific to Linux container. + LinuxContainerResources linux = 1; + // Resource limits configuration specific to Windows container. + WindowsContainerResources windows = 2; +} + +message UpdateContainerResourcesRequest { + // ID of the container to update. + string container_id = 1; + // Resource configuration specific to Linux containers. + LinuxContainerResources linux = 2; + // Resource configuration specific to Windows containers. + WindowsContainerResources windows = 3; + // Unstructured key-value map holding arbitrary additional information for + // container resources updating. This can be used for specifying experimental + // resources to update or other options to use when updating the container. + map annotations = 4; +} + +message UpdateContainerResourcesResponse {} + +message ExecSyncRequest { + // ID of the container. + string container_id = 1; + // Command to execute. + repeated string cmd = 2; + // Timeout in seconds to stop the command. Default: 0 (run forever). + int64 timeout = 3; +} + +message ExecSyncResponse { + // Captured command stdout output. + // The runtime should cap the output of this response to 16MB. + // If the stdout of the command produces more than 16MB, the remaining output + // should be discarded, and the command should proceed with no error. + // See CVE-2022-1708 and CVE-2022-31030 for more information. + bytes stdout = 1; + // Captured command stderr output. + // The runtime should cap the output of this response to 16MB. + // If the stderr of the command produces more than 16MB, the remaining output + // should be discarded, and the command should proceed with no error. + // See CVE-2022-1708 and CVE-2022-31030 for more information. + bytes stderr = 2; + // Exit code the command finished with. Default: 0 (success). + int32 exit_code = 3; +} + +message ExecRequest { + // ID of the container in which to execute the command. + string container_id = 1; + // Command to execute. + repeated string cmd = 2; + // Whether to exec the command in a TTY. + bool tty = 3; + // Whether to stream stdin. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + bool stdin = 4; + // Whether to stream stdout. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + bool stdout = 5; + // Whether to stream stderr. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + // If `tty` is true, `stderr` MUST be false. Multiplexing is not supported + // in this case. The output of stdout and stderr will be combined to a + // single stream. + bool stderr = 6; +} + +message ExecResponse { + // Fully qualified URL of the exec streaming server. + string url = 1; +} + +message AttachRequest { + // ID of the container to which to attach. + string container_id = 1; + // Whether to stream stdin. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + bool stdin = 2; + // Whether the process being attached is running in a TTY. + // This must match the TTY setting in the ContainerConfig. + bool tty = 3; + // Whether to stream stdout. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + bool stdout = 4; + // Whether to stream stderr. + // One of `stdin`, `stdout`, and `stderr` MUST be true. + // If `tty` is true, `stderr` MUST be false. Multiplexing is not supported + // in this case. The output of stdout and stderr will be combined to a + // single stream. + bool stderr = 5; +} + +message AttachResponse { + // Fully qualified URL of the attach streaming server. + string url = 1; +} + +message PortForwardRequest { + // ID of the container to which to forward the port. + string pod_sandbox_id = 1; + // Port to forward. + repeated int32 port = 2; +} + +message PortForwardResponse { + // Fully qualified URL of the port-forward streaming server. + string url = 1; +} + +message ImageFilter { + // Spec of the image. + ImageSpec image = 1; +} + +message ListImagesRequest { + // Filter to list images. + ImageFilter filter = 1; +} + +// Basic information about a container image. +message Image { + // ID of the image. + string id = 1; + // Other names by which this image is known. + repeated string repo_tags = 2; + // Digests by which this image is known. + repeated string repo_digests = 3; + // Size of the image in bytes. Must be > 0. + uint64 size = 4; + // UID that will run the command(s). This is used as a default if no user is + // specified when creating the container. UID and the following user name + // are mutually exclusive. + Int64Value uid = 5; + // User name that will run the command(s). This is used if UID is not set + // and no user is specified when creating container. + string username = 6; + // ImageSpec for image which includes annotations + ImageSpec spec = 7; + // Recommendation on whether this image should be exempt from garbage collection. + // It must only be treated as a recommendation -- the client can still request that the image be deleted, + // and the runtime must oblige. + bool pinned = 8; +} + +message ListImagesResponse { + // List of images. + repeated Image images = 1; +} + +message ImageStatusRequest { + // Spec of the image. + ImageSpec image = 1; + // Verbose indicates whether to return extra information about the image. + bool verbose = 2; +} + +message ImageStatusResponse { + // Status of the image. + Image image = 1; + // Info is extra information of the Image. The key could be arbitrary string, and + // value should be in json format. The information could include anything useful + // for debug, e.g. image config for oci image based container runtime. + // It should only be returned non-empty when Verbose is true. + map info = 2; +} + +// AuthConfig contains authorization information for connecting to a registry. +message AuthConfig { + string username = 1; + string password = 2; + string auth = 3; + string server_address = 4; + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + string identity_token = 5; + // RegistryToken is a bearer token to be sent to a registry + string registry_token = 6; +} + +message PullImageRequest { + // Spec of the image. + ImageSpec image = 1; + // Authentication configuration for pulling the image. + AuthConfig auth = 2; + // Config of the PodSandbox, which is used to pull image in PodSandbox context. + PodSandboxConfig sandbox_config = 3; +} + +message PullImageResponse { + // Reference to the image in use. For most runtimes, this should be an + // image ID or digest. + string image_ref = 1; +} + +message RemoveImageRequest { + // Spec of the image to remove. + ImageSpec image = 1; +} + +message RemoveImageResponse {} + +message NetworkConfig { + // CIDR to use for pod IP addresses. If the CIDR is empty, runtimes + // should omit it. + string pod_cidr = 1; +} + +message RuntimeConfig { + NetworkConfig network_config = 1; +} + +message UpdateRuntimeConfigRequest { + RuntimeConfig runtime_config = 1; +} + +message UpdateRuntimeConfigResponse {} + +// RuntimeCondition contains condition information for the runtime. +// There are 2 kinds of runtime conditions: +// 1. Required conditions: Conditions are required for kubelet to work +// properly. If any required condition is unmet, the node will be not ready. +// The required conditions include: +// * RuntimeReady: RuntimeReady means the runtime is up and ready to accept +// basic containers e.g. container only needs host network. +// * NetworkReady: NetworkReady means the runtime network is up and ready to +// accept containers which require container network. +// 2. Optional conditions: Conditions are informative to the user, but kubelet +// will not rely on. Since condition type is an arbitrary string, all conditions +// not required are optional. These conditions will be exposed to users to help +// them understand the status of the system. +message RuntimeCondition { + // Type of runtime condition. + string type = 1; + // Status of the condition, one of true/false. Default: false. + bool status = 2; + // Brief CamelCase string containing reason for the condition's last transition. + string reason = 3; + // Human-readable message indicating details about last transition. + string message = 4; +} + +// RuntimeStatus is information about the current status of the runtime. +message RuntimeStatus { + // List of current observed runtime conditions. + repeated RuntimeCondition conditions = 1; +} + +message StatusRequest { + // Verbose indicates whether to return extra information about the runtime. + bool verbose = 1; +} + +message StatusResponse { + // Status of the Runtime. + RuntimeStatus status = 1; + // Info is extra information of the Runtime. The key could be arbitrary string, and + // value should be in json format. The information could include anything useful for + // debug, e.g. plugins used by the container runtime. + // It should only be returned non-empty when Verbose is true. + map info = 2; +} + +message ImageFsInfoRequest {} + +// UInt64Value is the wrapper of uint64. +message UInt64Value { + // The value. + uint64 value = 1; +} + +// FilesystemIdentifier uniquely identify the filesystem. +message FilesystemIdentifier{ + // Mountpoint of a filesystem. + string mountpoint = 1; +} + +// FilesystemUsage provides the filesystem usage information. +message FilesystemUsage { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + int64 timestamp = 1; + // The unique identifier of the filesystem. + FilesystemIdentifier fs_id = 2; + // UsedBytes represents the bytes used for images on the filesystem. + // This may differ from the total bytes used on the filesystem and may not + // equal CapacityBytes - AvailableBytes. + UInt64Value used_bytes = 3; + // InodesUsed represents the inodes used by the images. + // This may not equal InodesCapacity - InodesAvailable because the underlying + // filesystem may also be used for purposes other than storing images. + UInt64Value inodes_used = 4; +} + +// WindowsFilesystemUsage provides the filesystem usage information specific to Windows. +message WindowsFilesystemUsage { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + int64 timestamp = 1; + // The unique identifier of the filesystem. + FilesystemIdentifier fs_id = 2; + // UsedBytes represents the bytes used for images on the filesystem. + // This may differ from the total bytes used on the filesystem and may not + // equal CapacityBytes - AvailableBytes. + UInt64Value used_bytes = 3; +} + +message ImageFsInfoResponse { + // Information of image filesystem(s). + repeated FilesystemUsage image_filesystems = 1; + // Information of container filesystem(s). + // This is an optional field, may be used for example if container and image + // storage are separated. + // Default will be to return this as empty. + repeated FilesystemUsage container_filesystems = 2; +} + +message ContainerStatsRequest{ + // ID of the container for which to retrieve stats. + string container_id = 1; +} + +message ContainerStatsResponse { + // Stats of the container. + ContainerStats stats = 1; +} + +message ListContainerStatsRequest{ + // Filter for the list request. + ContainerStatsFilter filter = 1; +} + +// ContainerStatsFilter is used to filter containers. +// All those fields are combined with 'AND' +message ContainerStatsFilter { + // ID of the container. + string id = 1; + // ID of the PodSandbox. + string pod_sandbox_id = 2; + // LabelSelector to select matches. + // Only api.MatchLabels is supported for now and the requirements + // are ANDed. MatchExpressions is not supported yet. + map label_selector = 3; +} + +message ListContainerStatsResponse { + // Stats of the container. + repeated ContainerStats stats = 1; +} + +// ContainerAttributes provides basic information of the container. +message ContainerAttributes { + // ID of the container. + string id = 1; + // Metadata of the container. + ContainerMetadata metadata = 2; + // Key-value pairs that may be used to scope and select individual resources. + map labels = 3; + // Unstructured key-value map holding arbitrary metadata. + // Annotations MUST NOT be altered by the runtime; the value of this field + // MUST be identical to that of the corresponding ContainerConfig used to + // instantiate the Container this status represents. + map annotations = 4; +} + +// ContainerStats provides the resource usage statistics for a container. +message ContainerStats { + // Information of the container. + ContainerAttributes attributes = 1; + // CPU usage gathered from the container. + CpuUsage cpu = 2; + // Memory usage gathered from the container. + MemoryUsage memory = 3; + // Usage of the writable layer. + FilesystemUsage writable_layer = 4; + // Swap usage gathered from the container. + SwapUsage swap = 5; +} + +// WindowsContainerStats provides the resource usage statistics for a container specific for Windows +message WindowsContainerStats { + // Information of the container. + ContainerAttributes attributes = 1; + // CPU usage gathered from the container. + WindowsCpuUsage cpu = 2; + // Memory usage gathered from the container. + WindowsMemoryUsage memory = 3; + // Usage of the writable layer. + WindowsFilesystemUsage writable_layer = 4; +} + +// CpuUsage provides the CPU usage information. +message CpuUsage { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + int64 timestamp = 1; + // Cumulative CPU usage (sum across all cores) since object creation. + UInt64Value usage_core_nano_seconds = 2; + // Total CPU usage (sum of all cores) averaged over the sample window. + // The "core" unit can be interpreted as CPU core-nanoseconds per second. + UInt64Value usage_nano_cores = 3; +} + +// WindowsCpuUsage provides the CPU usage information specific to Windows +message WindowsCpuUsage { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + int64 timestamp = 1; + // Cumulative CPU usage (sum across all cores) since object creation. + UInt64Value usage_core_nano_seconds = 2; + // Total CPU usage (sum of all cores) averaged over the sample window. + // The "core" unit can be interpreted as CPU core-nanoseconds per second. + UInt64Value usage_nano_cores = 3; +} + +// MemoryUsage provides the memory usage information. +message MemoryUsage { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + int64 timestamp = 1; + // The amount of working set memory in bytes. + UInt64Value working_set_bytes = 2; + // Available memory for use. This is defined as the memory limit - workingSetBytes. + UInt64Value available_bytes = 3; + // Total memory in use. This includes all memory regardless of when it was accessed. + UInt64Value usage_bytes = 4; + // The amount of anonymous and swap cache memory (includes transparent hugepages). + UInt64Value rss_bytes = 5; + // Cumulative number of minor page faults. + UInt64Value page_faults = 6; + // Cumulative number of major page faults. + UInt64Value major_page_faults = 7; +} + +message SwapUsage { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + int64 timestamp = 1; + // Available swap for use. This is defined as the swap limit - swapUsageBytes. + UInt64Value swap_available_bytes = 2; + // Total memory in use. This includes all memory regardless of when it was accessed. + UInt64Value swap_usage_bytes = 3; +} + +// WindowsMemoryUsage provides the memory usage information specific to Windows +message WindowsMemoryUsage { + // Timestamp in nanoseconds at which the information were collected. Must be > 0. + int64 timestamp = 1; + // The amount of working set memory in bytes. + UInt64Value working_set_bytes = 2; + // Available memory for use. This is defined as the memory limit - commit_memory_bytes. + UInt64Value available_bytes = 3; + // Cumulative number of page faults. + UInt64Value page_faults = 4; + // Total commit memory in use. Commit memory is total of physical and virtual memory in use. + UInt64Value commit_memory_bytes = 5; +} + +message ReopenContainerLogRequest { + // ID of the container for which to reopen the log. + string container_id = 1; +} + +message ReopenContainerLogResponse{ +} + +message CheckpointContainerRequest { + // ID of the container to be checkpointed. + string container_id = 1; + // Location of the checkpoint archive used for export + string location = 2; + // Timeout in seconds for the checkpoint to complete. + // Timeout of zero means to use the CRI default. + // Timeout > 0 means to use the user specified timeout. + int64 timeout = 3; +} + +message CheckpointContainerResponse {} + +message GetEventsRequest {} + +message ContainerEventResponse { + // ID of the container + string container_id = 1; + + // Type of the container event + ContainerEventType container_event_type = 2; + + // Creation timestamp of this event + int64 created_at = 3; + + // Sandbox status + PodSandboxStatus pod_sandbox_status = 4; + + // Container statuses + repeated ContainerStatus containers_statuses = 5; +} + +enum ContainerEventType { + // Container created + CONTAINER_CREATED_EVENT = 0; + + // Container started + CONTAINER_STARTED_EVENT = 1; + + // Container stopped + CONTAINER_STOPPED_EVENT = 2; + + // Container deleted + CONTAINER_DELETED_EVENT = 3; +} + +message ListMetricDescriptorsRequest {} + +message ListMetricDescriptorsResponse { + repeated MetricDescriptor descriptors = 1; +} + +message MetricDescriptor { + // The name field will be used as a unique identifier of this MetricDescriptor, + // and be used in conjunction with the Metric structure to populate the full Metric. + string name = 1; + string help = 2; + // When a metric uses this metric descriptor, it should only define + // labels that have previously been declared in label_keys. + // It is the responsibility of the runtime to correctly keep sorted the keys and values. + // If the two slices have different length, the behavior is undefined. + repeated string label_keys = 3; +} + +message ListPodSandboxMetricsRequest {} + +message ListPodSandboxMetricsResponse { + repeated PodSandboxMetrics pod_metrics = 1; +} + +message PodSandboxMetrics { + string pod_sandbox_id = 1; + repeated Metric metrics = 2; + repeated ContainerMetrics container_metrics = 3; +} + +message ContainerMetrics { + string container_id = 1; + repeated Metric metrics = 2; +} + +message Metric { + // Name must match a name previously returned in a MetricDescriptors call, + // otherwise, it will be ignored. + string name = 1; + // Timestamp should be 0 if the metric was gathered live. + // If it was cached, the Timestamp should reflect the time it was collected. + int64 timestamp = 2; + MetricType metric_type = 3; + // The corresponding LabelValues to the LabelKeys defined in the MetricDescriptor. + // It is the responsibility of the runtime to correctly keep sorted the keys and values. + // If the two slices have different length, the behavior is undefined. + repeated string label_values = 4; + UInt64Value value = 5; +} + +enum MetricType { + COUNTER = 0; + GAUGE = 1; +} + +message RuntimeConfigRequest {} + +message RuntimeConfigResponse { + // Configuration information for Linux-based runtimes. This field contains + // global runtime configuration options that are not specific to runtime + // handlers. + LinuxRuntimeConfiguration linux = 1; +} + +message LinuxRuntimeConfiguration { + // Cgroup driver to use + // Note: this field should not change for the lifecycle of the Kubelet, + // or while there are running containers. + // The Kubelet will not re-request this after startup, and will construct the cgroup + // hierarchy assuming it is static. + // If the runtime wishes to change this value, it must be accompanied by removal of + // all pods, and a restart of the Kubelet. The easiest way to do this is with a full node reboot. + CgroupDriver cgroup_driver = 1; +} + +enum CgroupDriver { + SYSTEMD = 0; + CGROUPFS = 1; +} + diff --git a/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/constants.go b/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/constants.go new file mode 100644 index 0000000000..6f9ad59eb5 --- /dev/null +++ b/vendor/k8s.io/cri-api/pkg/apis/runtime/v1/constants.go @@ -0,0 +1,55 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains all constants defined in CRI. + +// Required runtime condition type. +const ( + // RuntimeReady means the runtime is up and ready to accept basic containers. + RuntimeReady = "RuntimeReady" + // NetworkReady means the runtime network is up and ready to accept containers which require network. + NetworkReady = "NetworkReady" +) + +// LogStreamType is the type of the stream in CRI container log. +type LogStreamType string + +const ( + // Stdout is the stream type for stdout. + Stdout LogStreamType = "stdout" + // Stderr is the stream type for stderr. + Stderr LogStreamType = "stderr" +) + +// LogTag is the tag of a log line in CRI container log. +// Currently defined log tags: +// * First tag: Partial/Full - P/F. +// The field in the container log format can be extended to include multiple +// tags by using a delimiter, but changes should be rare. If it becomes clear +// that better extensibility is desired, a more extensible format (e.g., json) +// should be adopted as a replacement and/or addition. +type LogTag string + +const ( + // LogTagPartial means the line is part of multiple lines. + LogTagPartial LogTag = "P" + // LogTagFull means the line is a single full line or the end of multiple lines. + LogTagFull LogTag = "F" + // LogTagDelimiter is the delimiter for different log tags. + LogTagDelimiter = ":" +) diff --git a/vendor/k8s.io/cri-api/pkg/apis/testing/fake_image_service.go b/vendor/k8s.io/cri-api/pkg/apis/testing/fake_image_service.go new file mode 100644 index 0000000000..e6c6b131b5 --- /dev/null +++ b/vendor/k8s.io/cri-api/pkg/apis/testing/fake_image_service.go @@ -0,0 +1,256 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "context" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" +) + +// FakeImageService fakes the image service. +type FakeImageService struct { + sync.Mutex + + FakeImageSize uint64 + Called []string + Errors map[string][]error + Images map[string]*runtimeapi.Image + Pinned map[string]bool + + pulledImages []*pulledImage + + FakeFilesystemUsage []*runtimeapi.FilesystemUsage + FakeContainerFilesystemUsage []*runtimeapi.FilesystemUsage +} + +// SetFakeImages sets the list of fake images for the FakeImageService. +func (r *FakeImageService) SetFakeImages(images []string) { + r.Lock() + defer r.Unlock() + + r.Images = make(map[string]*runtimeapi.Image) + for _, image := range images { + r.Images[image] = r.makeFakeImage( + &runtimeapi.ImageSpec{ + Image: image, + Annotations: make(map[string]string)}) + } +} + +// SetFakeImagesWithAnnotations sets the list of fake images for the FakeImageService with annotations. +func (r *FakeImageService) SetFakeImagesWithAnnotations(imageSpecs []*runtimeapi.ImageSpec) { + r.Lock() + defer r.Unlock() + + r.Images = make(map[string]*runtimeapi.Image) + for _, imageSpec := range imageSpecs { + r.Images[imageSpec.Image] = r.makeFakeImage(imageSpec) + } +} + +// SetFakeImageSize sets the image size for the FakeImageService. +func (r *FakeImageService) SetFakeImageSize(size uint64) { + r.Lock() + defer r.Unlock() + + r.FakeImageSize = size +} + +// SetFakeImagePinned sets the image Pinned field for one image. +func (r *FakeImageService) SetFakeImagePinned(image string, pinned bool) { + r.Lock() + defer r.Unlock() + + if r.Pinned == nil { + r.Pinned = make(map[string]bool) + } + r.Pinned[image] = pinned +} + +// SetFakeFilesystemUsage sets the FilesystemUsage for FakeImageService. +func (r *FakeImageService) SetFakeFilesystemUsage(usage []*runtimeapi.FilesystemUsage) { + r.Lock() + defer r.Unlock() + + r.FakeFilesystemUsage = usage +} + +// SetFakeFilesystemUsage sets the FilesystemUsage for FakeImageService. +func (r *FakeImageService) SetFakeContainerFilesystemUsage(usage []*runtimeapi.FilesystemUsage) { + r.Lock() + defer r.Unlock() + + r.FakeContainerFilesystemUsage = usage +} + +// NewFakeImageService creates a new FakeImageService. +func NewFakeImageService() *FakeImageService { + return &FakeImageService{ + Called: make([]string, 0), + Errors: make(map[string][]error), + Images: make(map[string]*runtimeapi.Image), + } +} + +func (r *FakeImageService) makeFakeImage(image *runtimeapi.ImageSpec) *runtimeapi.Image { + return &runtimeapi.Image{ + Id: image.Image, + Size_: r.FakeImageSize, + Spec: image, + RepoTags: []string{image.Image}, + Pinned: r.Pinned[image.Image], + } +} + +// stringInSlice returns true if s is in list +func stringInSlice(s string, list []string) bool { + for _, v := range list { + if v == s { + return true + } + } + + return false +} + +// InjectError sets the error message for the FakeImageService. +func (r *FakeImageService) InjectError(f string, err error) { + r.Lock() + defer r.Unlock() + r.Errors[f] = append(r.Errors[f], err) +} + +// caller of popError must grab a lock. +func (r *FakeImageService) popError(f string) error { + if r.Errors == nil { + return nil + } + errs := r.Errors[f] + if len(errs) == 0 { + return nil + } + err, errs := errs[0], errs[1:] + r.Errors[f] = errs + return err +} + +// ListImages returns the list of images from FakeImageService or error if it was previously set. +func (r *FakeImageService) ListImages(_ context.Context, filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "ListImages") + if err := r.popError("ListImages"); err != nil { + return nil, err + } + + images := make([]*runtimeapi.Image, 0) + for _, img := range r.Images { + if filter != nil && filter.Image != nil { + if !stringInSlice(filter.Image.Image, img.RepoTags) { + continue + } + } + + images = append(images, img) + } + return images, nil +} + +// ImageStatus returns the status of the image from the FakeImageService. +func (r *FakeImageService) ImageStatus(_ context.Context, image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "ImageStatus") + if err := r.popError("ImageStatus"); err != nil { + return nil, err + } + + return &runtimeapi.ImageStatusResponse{Image: r.Images[image.Image]}, nil +} + +// PullImage emulate pulling the image from the FakeImageService. +func (r *FakeImageService) PullImage(_ context.Context, image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "PullImage") + if err := r.popError("PullImage"); err != nil { + return "", err + } + + r.pulledImages = append(r.pulledImages, &pulledImage{imageSpec: image, authConfig: auth}) + // ImageID should be randomized for real container runtime, but here just use + // image's name for easily making fake images. + imageID := image.Image + if _, ok := r.Images[imageID]; !ok { + r.Images[imageID] = r.makeFakeImage(image) + } + + return imageID, nil +} + +// RemoveImage removes image from the FakeImageService. +func (r *FakeImageService) RemoveImage(_ context.Context, image *runtimeapi.ImageSpec) error { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "RemoveImage") + if err := r.popError("RemoveImage"); err != nil { + return err + } + + // Remove the image + delete(r.Images, image.Image) + + return nil +} + +// ImageFsInfo returns information of the filesystem that is used to store images. +func (r *FakeImageService) ImageFsInfo(_ context.Context) (*runtimeapi.ImageFsInfoResponse, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "ImageFsInfo") + if err := r.popError("ImageFsInfo"); err != nil { + return nil, err + } + + return &runtimeapi.ImageFsInfoResponse{ + ImageFilesystems: r.FakeFilesystemUsage, + ContainerFilesystems: r.FakeContainerFilesystemUsage, + }, nil +} + +// AssertImagePulledWithAuth validates whether the image was pulled with auth and asserts if it wasn't. +func (r *FakeImageService) AssertImagePulledWithAuth(t *testing.T, image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, failMsg string) { + r.Lock() + defer r.Unlock() + expected := &pulledImage{imageSpec: image, authConfig: auth} + assert.Contains(t, r.pulledImages, expected, failMsg) +} + +type pulledImage struct { + imageSpec *runtimeapi.ImageSpec + authConfig *runtimeapi.AuthConfig +} diff --git a/vendor/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go b/vendor/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go new file mode 100644 index 0000000000..332aa71d52 --- /dev/null +++ b/vendor/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go @@ -0,0 +1,796 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "context" + "fmt" + "reflect" + "sync" + "time" + + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" +) + +var ( + // FakeVersion is a version of a fake runtime. + FakeVersion = "0.1.0" + + // FakeRuntimeName is the name of the fake runtime. + FakeRuntimeName = "fakeRuntime" + + // FakePodSandboxIPs is an IP address of the fake runtime. + FakePodSandboxIPs = []string{"192.168.192.168"} +) + +// FakePodSandbox is the fake implementation of runtimeapi.PodSandboxStatus. +type FakePodSandbox struct { + // PodSandboxStatus contains the runtime information for a sandbox. + runtimeapi.PodSandboxStatus + // RuntimeHandler is the runtime handler that was issued with the RunPodSandbox request. + RuntimeHandler string +} + +// FakeContainer is a fake container. +type FakeContainer struct { + // ContainerStatus contains the runtime information for a container. + runtimeapi.ContainerStatus + + // LinuxResources contains the resources specific to linux containers. + LinuxResources *runtimeapi.LinuxContainerResources + + // the sandbox id of this container + SandboxID string +} + +// FakeRuntimeService is a fake runetime service. +type FakeRuntimeService struct { + sync.Mutex + + Called []string + Errors map[string][]error + + FakeStatus *runtimeapi.RuntimeStatus + Containers map[string]*FakeContainer + Sandboxes map[string]*FakePodSandbox + FakeContainerStats map[string]*runtimeapi.ContainerStats + FakePodSandboxStats map[string]*runtimeapi.PodSandboxStats + FakePodSandboxMetrics map[string]*runtimeapi.PodSandboxMetrics + FakeMetricDescriptors map[string]*runtimeapi.MetricDescriptor + FakeContainerMetrics map[string]*runtimeapi.ContainerMetrics + FakeLinuxConfiguration *runtimeapi.LinuxRuntimeConfiguration + + ErrorOnSandboxCreate bool +} + +// GetContainerID returns the unique container ID from the FakeRuntimeService. +func (r *FakeRuntimeService) GetContainerID(sandboxID, name string, attempt uint32) (string, error) { + r.Lock() + defer r.Unlock() + + for id, c := range r.Containers { + if c.SandboxID == sandboxID && c.Metadata.Name == name && c.Metadata.Attempt == attempt { + return id, nil + } + } + return "", fmt.Errorf("container (name, attempt, sandboxID)=(%q, %d, %q) not found", name, attempt, sandboxID) +} + +// SetFakeSandboxes sets the fake sandboxes for the FakeRuntimeService. +func (r *FakeRuntimeService) SetFakeSandboxes(sandboxes []*FakePodSandbox) { + r.Lock() + defer r.Unlock() + + r.Sandboxes = make(map[string]*FakePodSandbox) + for _, sandbox := range sandboxes { + sandboxID := sandbox.Id + r.Sandboxes[sandboxID] = sandbox + } +} + +// SetFakeContainers sets fake containers for the FakeRuntimeService. +func (r *FakeRuntimeService) SetFakeContainers(containers []*FakeContainer) { + r.Lock() + defer r.Unlock() + + r.Containers = make(map[string]*FakeContainer) + for _, c := range containers { + containerID := c.Id + r.Containers[containerID] = c + } + +} + +// AssertCalls validates whether specified calls were made to the FakeRuntimeService. +func (r *FakeRuntimeService) AssertCalls(calls []string) error { + r.Lock() + defer r.Unlock() + + if !reflect.DeepEqual(calls, r.Called) { + return fmt.Errorf("expected %#v, got %#v", calls, r.Called) + } + return nil +} + +// GetCalls returns the list of calls made to the FakeRuntimeService. +func (r *FakeRuntimeService) GetCalls() []string { + r.Lock() + defer r.Unlock() + return append([]string{}, r.Called...) +} + +// InjectError inject the error to the next call to the FakeRuntimeService. +func (r *FakeRuntimeService) InjectError(f string, err error) { + r.Lock() + defer r.Unlock() + r.Errors[f] = append(r.Errors[f], err) +} + +// caller of popError must grab a lock. +func (r *FakeRuntimeService) popError(f string) error { + if r.Errors == nil { + return nil + } + errs := r.Errors[f] + if len(errs) == 0 { + return nil + } + err, errs := errs[0], errs[1:] + r.Errors[f] = errs + return err +} + +// NewFakeRuntimeService creates a new FakeRuntimeService. +func NewFakeRuntimeService() *FakeRuntimeService { + return &FakeRuntimeService{ + Called: make([]string, 0), + Errors: make(map[string][]error), + Containers: make(map[string]*FakeContainer), + Sandboxes: make(map[string]*FakePodSandbox), + FakeContainerStats: make(map[string]*runtimeapi.ContainerStats), + FakePodSandboxStats: make(map[string]*runtimeapi.PodSandboxStats), + FakePodSandboxMetrics: make(map[string]*runtimeapi.PodSandboxMetrics), + FakeContainerMetrics: make(map[string]*runtimeapi.ContainerMetrics), + } +} + +// Version returns version information from the FakeRuntimeService. +func (r *FakeRuntimeService) Version(_ context.Context, apiVersion string) (*runtimeapi.VersionResponse, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "Version") + if err := r.popError("Version"); err != nil { + return nil, err + } + + return &runtimeapi.VersionResponse{ + Version: FakeVersion, + RuntimeName: FakeRuntimeName, + RuntimeVersion: FakeVersion, + RuntimeApiVersion: FakeVersion, + }, nil +} + +// Status returns runtime status of the FakeRuntimeService. +func (r *FakeRuntimeService) Status(_ context.Context, verbose bool) (*runtimeapi.StatusResponse, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "Status") + if err := r.popError("Status"); err != nil { + return nil, err + } + + return &runtimeapi.StatusResponse{Status: r.FakeStatus}, nil +} + +// RunPodSandbox emulates the run of the pod sandbox in the FakeRuntimeService. +func (r *FakeRuntimeService) RunPodSandbox(_ context.Context, config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "RunPodSandbox") + if err := r.popError("RunPodSandbox"); err != nil { + return "", err + } + + if r.ErrorOnSandboxCreate { + return "", fmt.Errorf("error on sandbox create") + } + + // PodSandboxID should be randomized for real container runtime, but here just use + // fixed name from BuildSandboxName() for easily making fake sandboxes. + podSandboxID := BuildSandboxName(config.Metadata) + createdAt := time.Now().UnixNano() + r.Sandboxes[podSandboxID] = &FakePodSandbox{ + PodSandboxStatus: runtimeapi.PodSandboxStatus{ + Id: podSandboxID, + Metadata: config.Metadata, + State: runtimeapi.PodSandboxState_SANDBOX_READY, + CreatedAt: createdAt, + Network: &runtimeapi.PodSandboxNetworkStatus{ + Ip: FakePodSandboxIPs[0], + }, + // Without setting sandboxStatus's Linux.Namespaces.Options, kubeGenericRuntimeManager's podSandboxChanged will consider it as network + // namespace changed and always recreate sandbox which causes pod creation failed. + // Ref `sandboxStatus.GetLinux().GetNamespaces().GetOptions().GetNetwork() != networkNamespaceForPod(pod)` in podSandboxChanged function. + Linux: &runtimeapi.LinuxPodSandboxStatus{ + Namespaces: &runtimeapi.Namespace{ + Options: config.GetLinux().GetSecurityContext().GetNamespaceOptions(), + }, + }, + Labels: config.Labels, + Annotations: config.Annotations, + RuntimeHandler: runtimeHandler, + }, + RuntimeHandler: runtimeHandler, + } + // assign additional IPs + additionalIPs := FakePodSandboxIPs[1:] + additionalPodIPs := make([]*runtimeapi.PodIP, 0, len(additionalIPs)) + for _, ip := range additionalIPs { + additionalPodIPs = append(additionalPodIPs, &runtimeapi.PodIP{ + Ip: ip, + }) + } + r.Sandboxes[podSandboxID].PodSandboxStatus.Network.AdditionalIps = additionalPodIPs + return podSandboxID, nil +} + +// StopPodSandbox emulates the stop of pod sandbox in the FakeRuntimeService. +func (r *FakeRuntimeService) StopPodSandbox(_ context.Context, podSandboxID string) error { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "StopPodSandbox") + if err := r.popError("StopPodSandbox"); err != nil { + return err + } + + if s, ok := r.Sandboxes[podSandboxID]; ok { + s.State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY + } else { + return fmt.Errorf("pod sandbox %s not found", podSandboxID) + } + + return nil +} + +// RemovePodSandbox emulates removal of the pod sadbox in the FakeRuntimeService. +func (r *FakeRuntimeService) RemovePodSandbox(_ context.Context, podSandboxID string) error { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "RemovePodSandbox") + if err := r.popError("RemovePodSandbox"); err != nil { + return err + } + + // Remove the pod sandbox + delete(r.Sandboxes, podSandboxID) + + return nil +} + +// PodSandboxStatus returns pod sandbox status from the FakeRuntimeService. +func (r *FakeRuntimeService) PodSandboxStatus(_ context.Context, podSandboxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "PodSandboxStatus") + if err := r.popError("PodSandboxStatus"); err != nil { + return nil, err + } + + s, ok := r.Sandboxes[podSandboxID] + if !ok { + return nil, fmt.Errorf("pod sandbox %q not found", podSandboxID) + } + + status := s.PodSandboxStatus + return &runtimeapi.PodSandboxStatusResponse{Status: &status}, nil +} + +// ListPodSandbox returns the list of pod sandboxes in the FakeRuntimeService. +func (r *FakeRuntimeService) ListPodSandbox(_ context.Context, filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "ListPodSandbox") + if err := r.popError("ListPodSandbox"); err != nil { + return nil, err + } + + result := make([]*runtimeapi.PodSandbox, 0) + for id, s := range r.Sandboxes { + if filter != nil { + if filter.Id != "" && filter.Id != id { + continue + } + if filter.State != nil && filter.GetState().State != s.State { + continue + } + if filter.LabelSelector != nil && !filterInLabels(filter.LabelSelector, s.GetLabels()) { + continue + } + } + + result = append(result, &runtimeapi.PodSandbox{ + Id: s.Id, + Metadata: s.Metadata, + State: s.State, + CreatedAt: s.CreatedAt, + Labels: s.Labels, + Annotations: s.Annotations, + RuntimeHandler: s.RuntimeHandler, + }) + } + + return result, nil +} + +// PortForward emulates the set up of port forward in the FakeRuntimeService. +func (r *FakeRuntimeService) PortForward(context.Context, *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "PortForward") + if err := r.popError("PortForward"); err != nil { + return nil, err + } + + return &runtimeapi.PortForwardResponse{}, nil +} + +// CreateContainer emulates container creation in the FakeRuntimeService. +func (r *FakeRuntimeService) CreateContainer(_ context.Context, podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "CreateContainer") + if err := r.popError("CreateContainer"); err != nil { + return "", err + } + + // ContainerID should be randomized for real container runtime, but here just use + // fixed BuildContainerName() for easily making fake containers. + containerID := BuildContainerName(config.Metadata, podSandboxID) + createdAt := time.Now().UnixNano() + createdState := runtimeapi.ContainerState_CONTAINER_CREATED + imageRef := config.Image.Image + r.Containers[containerID] = &FakeContainer{ + ContainerStatus: runtimeapi.ContainerStatus{ + Id: containerID, + Metadata: config.Metadata, + Image: config.Image, + ImageRef: imageRef, + CreatedAt: createdAt, + State: createdState, + Labels: config.Labels, + Annotations: config.Annotations, + }, + SandboxID: podSandboxID, + LinuxResources: config.GetLinux().GetResources(), + } + + return containerID, nil +} + +// StartContainer emulates start of a container in the FakeRuntimeService. +func (r *FakeRuntimeService) StartContainer(_ context.Context, containerID string) error { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "StartContainer") + if err := r.popError("StartContainer"); err != nil { + return err + } + + c, ok := r.Containers[containerID] + if !ok { + return fmt.Errorf("container %s not found", containerID) + } + + // Set container to running. + c.State = runtimeapi.ContainerState_CONTAINER_RUNNING + c.StartedAt = time.Now().UnixNano() + + return nil +} + +// StopContainer emulates stop of a container in the FakeRuntimeService. +func (r *FakeRuntimeService) StopContainer(_ context.Context, containerID string, timeout int64) error { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "StopContainer") + if err := r.popError("StopContainer"); err != nil { + return err + } + + c, ok := r.Containers[containerID] + if !ok { + return fmt.Errorf("container %q not found", containerID) + } + + // Set container to exited state. + finishedAt := time.Now().UnixNano() + exitedState := runtimeapi.ContainerState_CONTAINER_EXITED + c.State = exitedState + c.FinishedAt = finishedAt + + return nil +} + +// RemoveContainer emulates remove of a container in the FakeRuntimeService. +func (r *FakeRuntimeService) RemoveContainer(_ context.Context, containerID string) error { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "RemoveContainer") + if err := r.popError("RemoveContainer"); err != nil { + return err + } + + // Remove the container + delete(r.Containers, containerID) + + return nil +} + +// ListContainers returns the list of containers in the FakeRuntimeService. +func (r *FakeRuntimeService) ListContainers(_ context.Context, filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "ListContainers") + if err := r.popError("ListContainers"); err != nil { + return nil, err + } + + result := make([]*runtimeapi.Container, 0) + for _, s := range r.Containers { + if filter != nil { + if filter.Id != "" && filter.Id != s.Id { + continue + } + if filter.PodSandboxId != "" && filter.PodSandboxId != s.SandboxID { + continue + } + if filter.State != nil && filter.GetState().State != s.State { + continue + } + if filter.LabelSelector != nil && !filterInLabels(filter.LabelSelector, s.GetLabels()) { + continue + } + } + + result = append(result, &runtimeapi.Container{ + Id: s.Id, + CreatedAt: s.CreatedAt, + PodSandboxId: s.SandboxID, + Metadata: s.Metadata, + State: s.State, + Image: s.Image, + ImageRef: s.ImageRef, + Labels: s.Labels, + Annotations: s.Annotations, + }) + } + + return result, nil +} + +// ContainerStatus returns the container status given the container ID in FakeRuntimeService. +func (r *FakeRuntimeService) ContainerStatus(_ context.Context, containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "ContainerStatus") + if err := r.popError("ContainerStatus"); err != nil { + return nil, err + } + + c, ok := r.Containers[containerID] + if !ok { + return nil, fmt.Errorf("container %q not found", containerID) + } + + status := c.ContainerStatus + return &runtimeapi.ContainerStatusResponse{Status: &status}, nil +} + +// UpdateContainerResources returns the container resource in the FakeRuntimeService. +func (r *FakeRuntimeService) UpdateContainerResources(context.Context, string, *runtimeapi.ContainerResources) error { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "UpdateContainerResources") + return r.popError("UpdateContainerResources") +} + +// ExecSync emulates the sync execution of a command in a container in the FakeRuntimeService. +func (r *FakeRuntimeService) ExecSync(_ context.Context, containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "ExecSync") + err = r.popError("ExecSync") + return +} + +// Exec emulates the execution of a command in a container in the FakeRuntimeService. +func (r *FakeRuntimeService) Exec(context.Context, *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "Exec") + if err := r.popError("Exec"); err != nil { + return nil, err + } + + return &runtimeapi.ExecResponse{}, nil +} + +// Attach emulates the attach request in the FakeRuntimeService. +func (r *FakeRuntimeService) Attach(_ context.Context, req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "Attach") + if err := r.popError("Attach"); err != nil { + return nil, err + } + + return &runtimeapi.AttachResponse{}, nil +} + +// UpdateRuntimeConfig emulates the update of a runtime config for the FakeRuntimeService. +func (r *FakeRuntimeService) UpdateRuntimeConfig(_ context.Context, runtimeCOnfig *runtimeapi.RuntimeConfig) error { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "UpdateRuntimeConfig") + return r.popError("UpdateRuntimeConfig") +} + +// SetFakeContainerStats sets the fake container stats in the FakeRuntimeService. +func (r *FakeRuntimeService) SetFakeContainerStats(containerStats []*runtimeapi.ContainerStats) { + r.Lock() + defer r.Unlock() + + r.FakeContainerStats = make(map[string]*runtimeapi.ContainerStats) + for _, s := range containerStats { + r.FakeContainerStats[s.Attributes.Id] = s + } +} + +// ContainerStats returns the container stats in the FakeRuntimeService. +func (r *FakeRuntimeService) ContainerStats(_ context.Context, containerID string) (*runtimeapi.ContainerStats, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "ContainerStats") + if err := r.popError("ContainerStats"); err != nil { + return nil, err + } + + s, found := r.FakeContainerStats[containerID] + if !found { + return nil, fmt.Errorf("no stats for container %q", containerID) + } + return s, nil +} + +// ListContainerStats returns the list of all container stats given the filter in the FakeRuntimeService. +func (r *FakeRuntimeService) ListContainerStats(_ context.Context, filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "ListContainerStats") + if err := r.popError("ListContainerStats"); err != nil { + return nil, err + } + + var result []*runtimeapi.ContainerStats + for _, c := range r.Containers { + if filter != nil { + if filter.Id != "" && filter.Id != c.Id { + continue + } + if filter.PodSandboxId != "" && filter.PodSandboxId != c.SandboxID { + continue + } + if filter.LabelSelector != nil && !filterInLabels(filter.LabelSelector, c.GetLabels()) { + continue + } + } + s, found := r.FakeContainerStats[c.Id] + if !found { + continue + } + result = append(result, s) + } + + return result, nil +} + +// SetFakePodSandboxStats sets the fake pod sandbox stats in the FakeRuntimeService. +func (r *FakeRuntimeService) SetFakePodSandboxStats(podStats []*runtimeapi.PodSandboxStats) { + r.Lock() + defer r.Unlock() + + r.FakePodSandboxStats = make(map[string]*runtimeapi.PodSandboxStats) + for _, s := range podStats { + r.FakePodSandboxStats[s.Attributes.Id] = s + } +} + +// PodSandboxStats returns the sandbox stats in the FakeRuntimeService. +func (r *FakeRuntimeService) PodSandboxStats(_ context.Context, podSandboxID string) (*runtimeapi.PodSandboxStats, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "PodSandboxStats") + if err := r.popError("PodSandboxStats"); err != nil { + return nil, err + } + + s, found := r.FakePodSandboxStats[podSandboxID] + if !found { + return nil, fmt.Errorf("no stats for pod sandbox %q", podSandboxID) + } + return s, nil +} + +// ListPodSandboxStats returns the list of all pod sandbox stats given the filter in the FakeRuntimeService. +func (r *FakeRuntimeService) ListPodSandboxStats(_ context.Context, filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "ListPodSandboxStats") + if err := r.popError("ListPodSandboxStats"); err != nil { + return nil, err + } + + var result []*runtimeapi.PodSandboxStats + for _, sb := range r.Sandboxes { + if filter != nil { + if filter.Id != "" && filter.Id != sb.Id { + continue + } + if filter.LabelSelector != nil && !filterInLabels(filter.LabelSelector, sb.GetLabels()) { + continue + } + } + s, found := r.FakePodSandboxStats[sb.Id] + if !found { + continue + } + result = append(result, s) + } + + return result, nil +} + +// ReopenContainerLog emulates call to the reopen container log in the FakeRuntimeService. +func (r *FakeRuntimeService) ReopenContainerLog(_ context.Context, containerID string) error { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "ReopenContainerLog") + + if err := r.popError("ReopenContainerLog"); err != nil { + return err + } + + return nil +} + +// CheckpointContainer emulates call to checkpoint a container in the FakeRuntimeService. +func (r *FakeRuntimeService) CheckpointContainer(_ context.Context, options *runtimeapi.CheckpointContainerRequest) error { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "CheckpointContainer") + + if err := r.popError("CheckpointContainer"); err != nil { + return err + } + + return nil +} + +func (f *FakeRuntimeService) GetContainerEvents(containerEventsCh chan *runtimeapi.ContainerEventResponse) error { + return nil +} + +// SetFakeMetricDescriptors sets the fake metrics descriptors in the FakeRuntimeService. +func (r *FakeRuntimeService) SetFakeMetricDescriptors(descs []*runtimeapi.MetricDescriptor) { + r.Lock() + defer r.Unlock() + + r.FakeMetricDescriptors = make(map[string]*runtimeapi.MetricDescriptor) + for _, d := range descs { + r.FakeMetricDescriptors[d.Name] = d + } +} + +// ListMetricDescriptors gets the descriptors for the metrics that will be returned in ListPodSandboxMetrics. +func (r *FakeRuntimeService) ListMetricDescriptors(_ context.Context) ([]*runtimeapi.MetricDescriptor, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "ListMetricDescriptors") + if err := r.popError("ListMetricDescriptors"); err != nil { + return nil, err + } + + descs := make([]*runtimeapi.MetricDescriptor, 0, len(r.FakeMetricDescriptors)) + for _, d := range r.FakeMetricDescriptors { + descs = append(descs, d) + } + + return descs, nil +} + +// SetFakePodSandboxMetrics sets the fake pod sandbox metrics in the FakeRuntimeService. +func (r *FakeRuntimeService) SetFakePodSandboxMetrics(podStats []*runtimeapi.PodSandboxMetrics) { + r.Lock() + defer r.Unlock() + + r.FakePodSandboxMetrics = make(map[string]*runtimeapi.PodSandboxMetrics) + for _, s := range podStats { + r.FakePodSandboxMetrics[s.PodSandboxId] = s + } +} + +// ListPodSandboxMetrics returns the list of all pod sandbox metrics in the FakeRuntimeService. +func (r *FakeRuntimeService) ListPodSandboxMetrics(_ context.Context) ([]*runtimeapi.PodSandboxMetrics, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "ListPodSandboxMetrics") + if err := r.popError("ListPodSandboxMetrics"); err != nil { + return nil, err + } + + var result []*runtimeapi.PodSandboxMetrics + for _, sb := range r.Sandboxes { + s, found := r.FakePodSandboxMetrics[sb.Id] + if !found { + continue + } + result = append(result, s) + } + + return result, nil +} + +// RuntimeConfig returns runtime configuration of the FakeRuntimeService. +func (r *FakeRuntimeService) RuntimeConfig(_ context.Context) (*runtimeapi.RuntimeConfigResponse, error) { + r.Lock() + defer r.Unlock() + + r.Called = append(r.Called, "RuntimeConfig") + if err := r.popError("RuntimeConfig"); err != nil { + return nil, err + } + + return &runtimeapi.RuntimeConfigResponse{Linux: r.FakeLinuxConfiguration}, nil +} diff --git a/vendor/k8s.io/cri-api/pkg/apis/testing/utils.go b/vendor/k8s.io/cri-api/pkg/apis/testing/utils.go new file mode 100644 index 0000000000..e4b6a8c6ba --- /dev/null +++ b/vendor/k8s.io/cri-api/pkg/apis/testing/utils.go @@ -0,0 +1,48 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" +) + +// BuildContainerName creates a unique container name string. +func BuildContainerName(metadata *runtimeapi.ContainerMetadata, sandboxID string) string { + // include the sandbox ID to make the container ID unique. + return fmt.Sprintf("%s_%s_%d", sandboxID, metadata.Name, metadata.Attempt) +} + +// BuildSandboxName creates a unique sandbox name string. +func BuildSandboxName(metadata *runtimeapi.PodSandboxMetadata) string { + return fmt.Sprintf("%s_%s_%s_%d", metadata.Name, metadata.Namespace, metadata.Uid, metadata.Attempt) +} + +func filterInLabels(filter, labels map[string]string) bool { + for k, v := range filter { + if value, ok := labels[k]; ok { + if value != v { + return false + } + } else { + return false + } + } + + return true +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 31d1e83775..f207bbe765 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -447,13 +447,16 @@ github.com/gobwas/glob/util/strings github.com/gofrs/flock # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 +github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto +github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru # github.com/golang/protobuf v1.5.3 ## explicit; go 1.9 +github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any @@ -1361,7 +1364,9 @@ golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/socks +golang.org/x/net/internal/timeseries golang.org/x/net/proxy +golang.org/x/net/trace # golang.org/x/oauth2 v0.14.0 ## explicit; go 1.18 golang.org/x/oauth2 @@ -1479,14 +1484,70 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch +# google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 +## explicit; go 1.19 +google.golang.org/genproto/googleapis/rpc/status +# google.golang.org/grpc v1.58.3 +## explicit; go 1.19 +google.golang.org/grpc +google.golang.org/grpc/attributes +google.golang.org/grpc/backoff +google.golang.org/grpc/balancer +google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/grpclb/state +google.golang.org/grpc/balancer/roundrobin +google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/channelz +google.golang.org/grpc/codes +google.golang.org/grpc/connectivity +google.golang.org/grpc/credentials +google.golang.org/grpc/credentials/insecure +google.golang.org/grpc/encoding +google.golang.org/grpc/encoding/proto +google.golang.org/grpc/grpclog +google.golang.org/grpc/internal +google.golang.org/grpc/internal/backoff +google.golang.org/grpc/internal/balancer/gracefulswitch +google.golang.org/grpc/internal/balancerload +google.golang.org/grpc/internal/binarylog +google.golang.org/grpc/internal/buffer +google.golang.org/grpc/internal/channelz +google.golang.org/grpc/internal/credentials +google.golang.org/grpc/internal/envconfig +google.golang.org/grpc/internal/grpclog +google.golang.org/grpc/internal/grpcrand +google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/idle +google.golang.org/grpc/internal/metadata +google.golang.org/grpc/internal/pretty +google.golang.org/grpc/internal/resolver +google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/passthrough +google.golang.org/grpc/internal/resolver/unix +google.golang.org/grpc/internal/serviceconfig +google.golang.org/grpc/internal/status +google.golang.org/grpc/internal/syscall +google.golang.org/grpc/internal/transport +google.golang.org/grpc/internal/transport/networktype +google.golang.org/grpc/keepalive +google.golang.org/grpc/metadata +google.golang.org/grpc/peer +google.golang.org/grpc/resolver +google.golang.org/grpc/serviceconfig +google.golang.org/grpc/stats +google.golang.org/grpc/status +google.golang.org/grpc/tap # google.golang.org/protobuf v1.31.0 ## explicit; go 1.11 +google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire google.golang.org/protobuf/internal/descfmt google.golang.org/protobuf/internal/descopts google.golang.org/protobuf/internal/detrand google.golang.org/protobuf/internal/encoding/defval +google.golang.org/protobuf/internal/encoding/json google.golang.org/protobuf/internal/encoding/messageset google.golang.org/protobuf/internal/encoding/tag google.golang.org/protobuf/internal/encoding/text @@ -2083,6 +2144,10 @@ k8s.io/component-base/metrics/prometheus/feature k8s.io/component-base/metrics/prometheusextension k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version +# k8s.io/cri-api v0.29.2 +## explicit; go 1.21 +k8s.io/cri-api/pkg/apis/runtime/v1 +k8s.io/cri-api/pkg/apis/testing # k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 ## explicit; go 1.13 k8s.io/gengo/args From 6be73bfc27c82d205bae5d4aa1b4f2c52d730a1e Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Sat, 13 Apr 2024 16:01:48 -0400 Subject: [PATCH 02/18] daemon: add CRI client Signed-off-by: Sam Batschelet --- pkg/daemon/cri/cri.go | 148 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 pkg/daemon/cri/cri.go diff --git a/pkg/daemon/cri/cri.go b/pkg/daemon/cri/cri.go new file mode 100644 index 0000000000..3298dda90a --- /dev/null +++ b/pkg/daemon/cri/cri.go @@ -0,0 +1,148 @@ +package cri + +import ( + "context" + "errors" + "fmt" + "net" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/backoff" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/status" + + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" +) + +const ( + // gRPC connection parameters taken from kubelet + maxMsgSize = 16 * 1024 * 1024 // 16MiB + maxBackoffDelay = 3 * time.Second + baseBackoffDelay = 100 * time.Millisecond + minConnectionTimeout = 5 * time.Second +) + +// NewClient creates a new container runtime image client. +func NewClient(ctx context.Context, target string) (*Client, error) { + conn, err := newClientConn(ctx, target) + if err != nil { + return nil, err + } + return &Client{ + conn: conn, + image: runtimeapi.NewImageServiceClient(conn), + }, nil +} + +type Client struct { + conn *grpc.ClientConn + image runtimeapi.ImageServiceClient +} + +func (c *Client) PullImage(ctx context.Context, image string, auth *runtimeapi.AuthConfig) error { + resp, err := c.image.PullImage(ctx, &runtimeapi.PullImageRequest{ + Image: &runtimeapi.ImageSpec{ + Image: image, + }, + Auth: auth, + }) + if err != nil { + statusErr, ok := status.FromError(err) + // If the gRPC error code is unknown don't return the full error just + // the actual message. + if ok && statusErr.Code() == codes.Unknown { + return errors.New(statusErr.Message()) + } + return err + } + if resp.ImageRef == "" { + return fmt.Errorf("imageRef for %s not set", image) + } + + return nil +} + +// ImageStatus returns true if the image exists in the container runtime. +func (c *Client) ImageStatus(ctx context.Context, image string) (bool, error) { + resp, err := c.image.ImageStatus(ctx, &runtimeapi.ImageStatusRequest{ + Image: &runtimeapi.ImageSpec{Image: image}, + }) + if err != nil { + return false, fmt.Errorf("failed to get image status for %q: %w", image, err) + } + + if resp.Image != nil { + return true, nil + } + + return false, nil +} + +// RemoveImage removes the image from the container runtime. +func (c *Client) RemoveImage(ctx context.Context, image string) error { + _, err := c.image.RemoveImage(ctx, &runtimeapi.RemoveImageRequest{ + Image: &runtimeapi.ImageSpec{ + Image: image, + }, + }) + return err +} + +// ListImages returns a list of images in the container runtime. +func (c *Client) ListImages(ctx context.Context, filter string) ([]*runtimeapi.Image, error) { + resp, err := c.image.ListImages(ctx, &runtimeapi.ListImagesRequest{ + Filter: &runtimeapi.ImageFilter{ + Image: &runtimeapi.ImageSpec{ + Image: filter, + }, + }, + }) + if err != nil { + return nil, err + } + return resp.Images, nil +} + +// ImageFsInfo returns information about the filesystem that is used to store images. +func (c *Client) ImageFsInfo(ctx context.Context) (*runtimeapi.ImageFsInfoResponse, error) { + return c.image.ImageFsInfo(ctx, &runtimeapi.ImageFsInfoRequest{}) +} + +// NewClientConn creates a new grpc client connection to the container runtime service. support unix, http and https scheme. +func newClientConn(ctx context.Context, target string) (conn *grpc.ClientConn, err error) { + backoff := backoff.DefaultConfig + backoff.BaseDelay = baseBackoffDelay + backoff.MaxDelay = maxBackoffDelay + connParams := grpc.ConnectParams{ + Backoff: backoff, + MinConnectTimeout: minConnectionTimeout, + } + + network := "unix" + if _, _, err := net.SplitHostPort(target); err == nil { + // If the target has a valid address:port, assume tcp network. + network = "tcp" + } + + dialerFn := func(ctx context.Context, target string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, network, target) + } + + var dialOpts []grpc.DialOption + dialOpts = append(dialOpts, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(maxMsgSize), + ), + grpc.WithConnectParams(connParams), + grpc.WithContextDialer(dialerFn), + ) + + return grpc.DialContext(ctx, target, dialOpts...) +} + +func (c *Client) Close() error { + return c.conn.Close() +} From 380c200116272edcd9487c48b7353268f13bd5ed Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Sat, 13 Apr 2024 16:02:38 -0400 Subject: [PATCH 03/18] daemon: add pinned image set manager Signed-off-by: Sam Batschelet --- cmd/machine-config-daemon/start.go | 27 +- .../machineconfigdaemon/clusterrole.yaml | 3 + pkg/daemon/constants/constants.go | 10 + pkg/daemon/pinned_image_set.go | 1214 +++++++++++++++++ pkg/daemon/pinned_image_set_test.go | 622 +++++++++ pkg/upgrademonitor/upgrade_monitor.go | 83 +- 6 files changed, 1956 insertions(+), 3 deletions(-) create mode 100644 pkg/daemon/pinned_image_set.go create mode 100644 pkg/daemon/pinned_image_set_test.go diff --git a/cmd/machine-config-daemon/start.go b/cmd/machine-config-daemon/start.go index f02c354ee1..8bcf1ccf2f 100644 --- a/cmd/machine-config-daemon/start.go +++ b/cmd/machine-config-daemon/start.go @@ -8,11 +8,15 @@ import ( "os" "time" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/client-go/tools/clientcmd" + configv1 "github.com/openshift/api/config/v1" "github.com/openshift/machine-config-operator/internal/clients" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/openshift/machine-config-operator/pkg/daemon" + "github.com/openshift/machine-config-operator/pkg/daemon/constants" + "github.com/openshift/machine-config-operator/pkg/daemon/cri" "github.com/openshift/machine-config-operator/pkg/version" "github.com/spf13/cobra" "k8s.io/klog/v2" @@ -191,6 +195,25 @@ func runStartCmd(_ *cobra.Command, _ []string) { klog.Fatalf("Failed to initialize: %v", err) } + criClient, err := cri.NewClient(ctx, constants.DefaultCRIOSocketPath) + if err != nil { + klog.Fatalf("Failed to initialize CRI client: %v", err) + } + prefetchTimeout := 2 * time.Minute + pinnedImageSetManager := daemon.NewPinnedImageSetManager( + startOpts.nodeName, + criClient, + ctrlctx.ClientBuilder.MachineConfigClientOrDie(componentName), + ctrlctx.InformerFactory.Machineconfiguration().V1alpha1().PinnedImageSets(), + ctrlctx.KubeInformerFactory.Core().V1().Nodes(), + ctrlctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), + resource.MustParse(constants.MinFreeStorageAfterPrefetch), + constants.DefaultCRIOSocketPath, + constants.KubeletAuthFile, + prefetchTimeout, + ctrlctx.FeatureGateAccess, + ) + ctrlctx.ConfigInformerFactory.Start(ctrlctx.Stop) ctrlctx.KubeInformerFactory.Start(stopCh) ctrlctx.KubeNamespacedInformerFactory.Start(stopCh) @@ -205,6 +228,9 @@ func runStartCmd(_ *cobra.Command, _ []string) { klog.Fatalf("Could not get FG: %v", err) } else { klog.Infof("FeatureGates initialized: knownFeatureGates=%v", featureGates.KnownFeatures()) + if featureGates.Enabled(configv1.FeatureGatePinnedImages) { + go pinnedImageSetManager.Run(2, stopCh) + } } case <-time.After(1 * time.Minute): klog.Fatalf("Could not get FG, timed out: %v", err) @@ -221,5 +247,4 @@ func runStartCmd(_ *cobra.Command, _ []string) { os.Exit(255) } } - } diff --git a/manifests/machineconfigdaemon/clusterrole.yaml b/manifests/machineconfigdaemon/clusterrole.yaml index 26804ca2ce..0a174c0a2c 100644 --- a/manifests/machineconfigdaemon/clusterrole.yaml +++ b/manifests/machineconfigdaemon/clusterrole.yaml @@ -12,6 +12,9 @@ rules: - apiGroups: ["machineconfiguration.openshift.io"] resources: ["machineconfigs", "controllerconfigs"] verbs: ["get", "list", "watch"] +- apiGroups: ["machineconfiguration.openshift.io"] + resources: ["machineconfigpools", "pinnedimagesets"] + verbs: ["get", "list", "watch"] - apiGroups: ["machineconfiguration.openshift.io"] resources: ["machineconfignodes", "machineconfignodes/status"] verbs: ["create", "update", "patch", "get"] diff --git a/pkg/daemon/constants/constants.go b/pkg/daemon/constants/constants.go index 9dca9faab2..4cda3bc7b5 100644 --- a/pkg/daemon/constants/constants.go +++ b/pkg/daemon/constants/constants.go @@ -111,4 +111,14 @@ const ( // DaemonReloadCommand is used to specify reloads and restarts of the systemd manager configuration DaemonReloadCommand = "daemon-reload" + + // DefaultCRIOSocketPath is the default path to the CRI-O socket + DefaultCRIOSocketPath = "/var/run/crio/crio.sock" + + // KubeletAuthFile is the path to the kubelet auth file. + KubeletAuthFile = "/var/lib/kubelet/config.json" + + // MinFreeStorageAfterPrefetch is the minimum amount of storage + // available on the root filesystem after prefetching images. + MinFreeStorageAfterPrefetch = "16Gi" ) diff --git a/pkg/daemon/pinned_image_set.go b/pkg/daemon/pinned_image_set.go new file mode 100644 index 0000000000..974e214fcc --- /dev/null +++ b/pkg/daemon/pinned_image_set.go @@ -0,0 +1,1214 @@ +package daemon + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "reflect" + "strings" + "sync" + "time" + + "github.com/BurntSushi/toml" + "github.com/containers/image/v5/docker/reference" + ign3types "github.com/coreos/ignition/v2/config/v3_4/types" + "github.com/golang/groupcache/lru" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + coreinformersv1 "k8s.io/client-go/informers/core/v1" + corev1lister "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + machineconfigurationalphav1 "github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1alpha1" + mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" + mcfginformersv1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1" + mcfginformersv1alpha1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1" + mcfglistersv1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1" + mcfglistersv1alpha1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1" + "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/pkg/daemon/constants" + "github.com/openshift/machine-config-operator/pkg/daemon/cri" + "github.com/openshift/machine-config-operator/pkg/helpers" + "github.com/openshift/machine-config-operator/pkg/upgrademonitor" +) + +const ( + // cri gRPC connection parameters taken from kubelet + criPrefetchInterval = 30 * time.Second + defaultPrefetchWorkers = 5 + defaultControlPlaneWorkers = 1 + defaultPrefetchThrottleDuration = 1 * time.Second + + crioPinnedImagesDropInFilePath = "/etc/crio/crio.conf.d/50-pinned-images" + + // backoff configuration + maxRetries = 5 + retryDuration = 1 * time.Second + retryFactor = 2.0 + retryCap = 10 * time.Second + + // controller configuration + maxRetriesController = 15 +) + +var ( + errInsufficientStorage = errors.New("storage available is less than minimum required") + errFailedToPullImage = errors.New("failed to pull image") + errNotFound = errors.New("not found") + errNoAuthForImage = errors.New("no auth found for image") +) + +// PinnedImageSetManager manages the prefetching of images. +type PinnedImageSetManager struct { + // nodeName is the name of the node. + nodeName string + + imageSetLister mcfglistersv1alpha1.PinnedImageSetLister + imageSetSynced cache.InformerSynced + + nodeLister corev1lister.NodeLister + nodeListerSynced cache.InformerSynced + + mcpLister mcfglistersv1.MachineConfigPoolLister + mcpSynced cache.InformerSynced + + mcfgClient mcfgclientset.Interface + + prefetchCh chan prefetch + + criClient *cri.Client + + // minimum storage available after prefetching + minStorageAvailableBytes resource.Quantity + // path to the authfile + authFilePath string + // endpoint of the container runtime service + runtimeEndpoint string + // timeout for the prefetch operation + prefetchTimeout time.Duration + // backoff configuration for retries. + backoff wait.Backoff + + // cache for reusable image information + cache *lru.Cache + + syncHandler func(string) error + enqueueMachineConfigPool func(*mcfgv1.MachineConfigPool) + queue workqueue.RateLimitingInterface + featureGatesAccessor featuregates.FeatureGateAccess + + // mutex to protect the cancelFn + mu sync.Mutex + cancelFn context.CancelFunc +} + +// NewPinnedImageSetManager creates a new pinned image set manager. +func NewPinnedImageSetManager( + nodeName string, + criClient *cri.Client, + mcfgClient mcfgclientset.Interface, + imageSetInformer mcfginformersv1alpha1.PinnedImageSetInformer, + nodeInformer coreinformersv1.NodeInformer, + mcpInformer mcfginformersv1.MachineConfigPoolInformer, + minStorageAvailableBytes resource.Quantity, + runtimeEndpoint string, + authFilePath string, + prefetchTimeout time.Duration, + featureGatesAccessor featuregates.FeatureGateAccess, +) *PinnedImageSetManager { + p := &PinnedImageSetManager{ + nodeName: nodeName, + mcfgClient: mcfgClient, + runtimeEndpoint: runtimeEndpoint, + authFilePath: authFilePath, + prefetchTimeout: prefetchTimeout, + minStorageAvailableBytes: minStorageAvailableBytes, + featureGatesAccessor: featureGatesAccessor, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pinned-image-set-manager"), + prefetchCh: make(chan prefetch, defaultPrefetchWorkers*2), + criClient: criClient, + backoff: wait.Backoff{ + Steps: maxRetries, + Duration: retryDuration, + Factor: retryFactor, + Cap: retryCap, + }, + } + + imageSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: p.addPinnedImageSet, + UpdateFunc: p.updatePinnedImageSet, + DeleteFunc: p.deletePinnedImageSet, + }) + + mcpInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: p.addMachineConfigPool, + UpdateFunc: p.updateMachineConfigPool, + DeleteFunc: p.deleteMachineConfigPool, + }) + + p.syncHandler = p.sync + p.enqueueMachineConfigPool = p.enqueue + + p.imageSetLister = imageSetInformer.Lister() + p.imageSetSynced = imageSetInformer.Informer().HasSynced + + p.nodeLister = nodeInformer.Lister() + p.nodeListerSynced = nodeInformer.Informer().HasSynced + + p.mcpLister = mcpInformer.Lister() + p.mcpSynced = mcpInformer.Informer().HasSynced + + p.cache = lru.New(256) + + return p +} + +type imageInfo struct { + Name string + Size int64 +} + +func (p *PinnedImageSetManager) sync(key string) error { + node, err := p.nodeLister.Get(p.nodeName) + if err != nil { + return fmt.Errorf("failed to get node %q: %w", p.nodeName, err) + } + + pools, _, err := helpers.GetPoolsForNode(p.mcpLister, node) + if err != nil { + return err + } + + if len(pools) == 0 { + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), p.prefetchTimeout) + // cancel any currently running tasks in the worker pool + p.resetWorkload(cancel) + + if err := p.syncMachineConfigPools(ctx, pools); err != nil { + if errors.Is(err, context.DeadlineExceeded) { + ctxErr := fmt.Errorf("requeue: prefetching images incomplete after: %v", p.prefetchTimeout) + p.updateStatusError(pools, ctxErr) + klog.Error(ctxErr) + return ctxErr + } + p.updateStatusError(pools, err) + return err + } + + p.updateStatusProgressingComplete(pools, "All pinned image sets complete") + return nil +} + +func (p *PinnedImageSetManager) syncMachineConfigPools(ctx context.Context, pools []*mcfgv1.MachineConfigPool) error { + images := make([]mcfgv1alpha1.PinnedImageRef, 0, 100) + for _, pool := range pools { + validTarget, err := validateTargetPoolForNode(p.nodeName, p.nodeLister, pool) + if err != nil { + klog.Errorf("failed to validate pool %q: %v", pool.Name, err) + return err + } + if !validTarget { + continue + } + + if err := p.syncMachineConfigPool(ctx, pool); err != nil { + return err + } + + // collect all unique images from all pools + for _, image := range pool.Spec.PinnedImageSets { + imageSet, err := p.imageSetLister.Get(image.Name) + if err != nil { + if apierrors.IsNotFound(err) { + klog.Warningf("PinnedImageSet %q not found", image.Name) + continue + } + return fmt.Errorf("failed to get PinnedImageSet %q: %w", image.Name, err) + } + images = append(images, imageSet.Spec.PinnedImages...) + } + } + + // write config and reload crio last to allow a window for kubelet to gc images in emergency + if err := p.ensureCrioPinnedImagesConfigFile(images); err != nil { + klog.Errorf("failed to write crio config file: %v", err) + return err + } + + return nil +} + +// validateTargetPoolForNode checks if the node is a target for the given pool. +func validateTargetPoolForNode(nodeName string, nodeLister corev1lister.NodeLister, pool *mcfgv1.MachineConfigPool) (bool, error) { + if pool.Spec.PinnedImageSets == nil { + return false, nil + } + + selector, err := metav1.LabelSelectorAsSelector(pool.Spec.NodeSelector) + if err != nil { + return false, fmt.Errorf("invalid label selector: %w", err) + } + + nodes, err := nodeLister.List(selector) + if err != nil { + return false, err + } + + if !isNodeInPool(nodes, nodeName) { + return false, nil + } + + return true, nil +} + +func (p *PinnedImageSetManager) resetWorkload(cancelFn context.CancelFunc) { + p.mu.Lock() + if p.cancelFn != nil { + p.cancelFn() + } + p.cancelFn = cancelFn + p.mu.Unlock() +} + +func (p *PinnedImageSetManager) syncMachineConfigPool(ctx context.Context, pool *mcfgv1.MachineConfigPool) error { + if pool.Spec.PinnedImageSets == nil { + return nil + } + imageSets := make([]*mcfgv1alpha1.PinnedImageSet, 0, len(pool.Spec.PinnedImageSets)) + for _, ref := range pool.Spec.PinnedImageSets { + imageSet, err := p.imageSetLister.Get(ref.Name) + if err != nil { + return fmt.Errorf("failed to get PinnedImageSet %q: %w", ref.Name, err) + } + klog.Infof("Reconciling pinned image set: %s: generation: %d", ref.Name, imageSet.GetGeneration()) + + // verify storage per image set + if err := p.checkNodeAllocatableStorage(ctx, imageSet); err != nil { + return err + } + imageSets = append(imageSets, imageSet) + } + + return p.prefetchImageSets(ctx, imageSets...) +} + +// prefetchMonitor is used to monitor the status of prefetch operations. +type prefetchMonitor struct { + once sync.Once + errFn func(error) + err error + wg sync.WaitGroup +} + +func newPrefetchMonitor() *prefetchMonitor { + m := &prefetchMonitor{} + m.errFn = func(err error) { + m.once.Do(func() { + m.err = err + }) + } + return m +} + +// Add increments the number of prefetch operations the monitor is waiting for. +func (m *prefetchMonitor) Add(i int) { + m.wg.Add(i) +} + +func (m *prefetchMonitor) finalize(err error) { + m.once.Do(func() { + m.err = err + }) +} + +// Error is called when an error occurs during prefetching. +func (m *prefetchMonitor) Error(err error) { + m.finalize(err) +} + +// Done decrements the number of prefetch operations the monitor is waiting for. +func (m *prefetchMonitor) Done() { + m.wg.Done() +} + +// WaitForDone waits for the prefetch operations to complete and returns the first error encountered. +func (m *prefetchMonitor) WaitForDone() error { + m.wg.Wait() + return m.err +} + +// prefetchImageSets schedules the prefetching of images for the given image sets and waits for completion. +func (p *PinnedImageSetManager) prefetchImageSets(ctx context.Context, imageSets ...*mcfgv1alpha1.PinnedImageSet) error { + registryAuth, err := newRegistryAuth(p.authFilePath) + if err != nil { + return err + } + + // monitor prefetch operations + monitor := newPrefetchMonitor() + for _, imageSet := range imageSets { + if err := scheduleWork(ctx, p.prefetchCh, registryAuth, imageSet.Spec.PinnedImages, monitor); err != nil { + return err + } + } + + return monitor.WaitForDone() +} + +func isNodeInPool(nodes []*corev1.Node, nodeName string) bool { + for _, n := range nodes { + if n.Name == nodeName { + return true + } + } + return false +} + +func (p *PinnedImageSetManager) checkNodeAllocatableStorage(ctx context.Context, imageSet *mcfgv1alpha1.PinnedImageSet) error { + node, err := p.nodeLister.Get(p.nodeName) + if err != nil { + return fmt.Errorf("failed to get node %q: %w", p.nodeName, err) + } + + storageCapacity, ok := node.Status.Capacity[corev1.ResourceEphemeralStorage] + if !ok { + return fmt.Errorf("node %q has no ephemeral storage capacity", p.nodeName) + } + + capacity := storageCapacity.Value() + if storageCapacity.Cmp(p.minStorageAvailableBytes) < 0 { + return fmt.Errorf("%w capacity: %d, required: %d", errInsufficientStorage, capacity, p.minStorageAvailableBytes.Value()) + } + + if err := p.checkImagePayloadStorage(ctx, imageSet.Spec.PinnedImages, capacity); err != nil { + return err + } + + return nil +} + +func (p *PinnedImageSetManager) checkImagePayloadStorage(ctx context.Context, images []mcfgv1alpha1.PinnedImageRef, capacity int64) error { + // calculate total required storage for all images. + requiredStorage := int64(0) + for _, image := range images { + imageName := image.Name + exists, err := p.criClient.ImageStatus(ctx, imageName) + if err != nil { + return err + } + if exists { + // dont account for storage if the image already exists + continue + } + + // check if the image is already in the cache before fetching the size + if value, found := p.cache.Get(imageName); found { + imageInfo, ok := value.(imageInfo) + if ok { + requiredStorage += imageInfo.Size * 2 + continue + } + // cache is corrupted, delete the key + klog.Warningf("corrupted cache entry for image %q, deleting", imageName) + p.cache.Remove(imageName) + } + size, err := getImageSize(ctx, imageName, p.authFilePath) + if err != nil { + return err + } + + // cache miss + p.cache.Add(imageName, imageInfo{Name: imageName, Size: size}) + + // account for decompression + requiredStorage += size * 2 + } + + if requiredStorage >= capacity-p.minStorageAvailableBytes.Value() { + klog.Errorf("%v capacity=%d, required=%d", errInsufficientStorage, capacity, requiredStorage+p.minStorageAvailableBytes.Value()) + return errInsufficientStorage + } + + return nil +} + +func isImageSetInPool(imageSet string, pool *mcfgv1.MachineConfigPool) bool { + for _, set := range pool.Spec.PinnedImageSets { + if set.Name == imageSet { + return true + } + } + return false +} + +// ensureCrioPinnedImagesConfigFile creates the crio config file which populates pinned_images with a unique list of images then reloads crio. +// If the list is empty, and a config exists on disk the file is removed and crio is reloaded. +func (p *PinnedImageSetManager) ensureCrioPinnedImagesConfigFile(images []mcfgv1alpha1.PinnedImageRef) error { + hasCrioConfigFile, err := p.hasPinnedImagesConfigFile() + if err != nil { + return fmt.Errorf("failed to check crio config file: %w", err) + } + + // if there are no images and the config file exists, remove it + if len(images) == 0 { + if hasCrioConfigFile { + // remove the crio config file and reload crio + if err := p.deleteCrioConfigFile(); err != nil { + return fmt.Errorf("failed to remove crio config file: %w", err) + } + return p.crioReload() + } + return nil + } + + // ensure list elements are unique + // assume the number of duplicates is small so preallocate makes sense + // for larger lists + seen := make(map[string]struct{}, len(images)) + unique := make([]string, 0, len(images)) + for _, image := range images { + if _, ok := seen[image.Name]; !ok { + seen[image.Name] = struct{}{} + unique = append(unique, image.Name) + } + } + + // create ignition file for crio drop-in config. this is done to use the existing + // atomic writer functionality. + ignFile, err := createCrioConfigIgnitionFile(unique) + if err != nil { + return fmt.Errorf("failed to create crio config ignition file: %w", err) + } + + skipCertificateWrite := true + if err := writeFiles([]ign3types.File{ignFile}, skipCertificateWrite); err != nil { + return fmt.Errorf("failed to write crio config file: %w", err) + } + return p.crioReload() +} + +func (p *PinnedImageSetManager) updateStatusProgressingComplete(pools []*mcfgv1.MachineConfigPool, message string) { + node, err := p.nodeLister.Get(p.nodeName) + if err != nil { + klog.Errorf("Failed to get node %q: %v", p.nodeName, err) + return + } + + applyCfg, imageSets, err := getImageSetApplyConfigs(p.imageSetLister, pools, nil) + if err != nil { + klog.Errorf("Failed to get image set apply configs: %v", err) + return + } + + err = upgrademonitor.UpdateMachineConfigNodeStatus( + &upgrademonitor.Condition{ + State: mcfgv1alpha1.MachineConfigNodePinnedImageSetsProgressing, + Reason: "AsExpected", + Message: message, + }, + nil, + metav1.ConditionFalse, + metav1.ConditionUnknown, + node, + p.mcfgClient, + applyCfg, + imageSets, + p.featureGatesAccessor, + ) + if err != nil { + klog.Errorf("Failed to updated machine config node: %v", err) + } + + // reset any degraded status + err = upgrademonitor.UpdateMachineConfigNodeStatus( + &upgrademonitor.Condition{ + State: mcfgv1alpha1.MachineConfigNodePinnedImageSetsDegraded, + Reason: "AsExpected", + Message: "All is good", + }, + nil, + metav1.ConditionFalse, + metav1.ConditionUnknown, + node, + p.mcfgClient, + applyCfg, + imageSets, + p.featureGatesAccessor, + ) + if err != nil { + klog.Errorf("Failed to updated machine config node: %v", err) + } +} + +func (p *PinnedImageSetManager) updateStatusError(pools []*mcfgv1.MachineConfigPool, statusErr error) { + node, err := p.nodeLister.Get(p.nodeName) + if err != nil { + klog.Errorf("Failed to get node %q: %v", p.nodeName, err) + return + } + + applyCfg, imageSets, err := getImageSetApplyConfigs(p.imageSetLister, pools, nil) + if err != nil { + klog.Errorf("Failed to get image set apply configs: %v", err) + return + } + + err = upgrademonitor.UpdateMachineConfigNodeStatus( + &upgrademonitor.Condition{ + State: mcfgv1alpha1.MachineConfigNodePinnedImageSetsProgressing, + Reason: "ImagePrefetch", + Message: fmt.Sprintf("node is prefetching images: %s", node.Name), + }, + nil, + metav1.ConditionTrue, + metav1.ConditionUnknown, + node, + p.mcfgClient, + applyCfg, + imageSets, + p.featureGatesAccessor, + ) + if err != nil { + klog.Errorf("Failed to updated machine config node: %v", err) + } + + applyCfg, imageSets, err = getImageSetApplyConfigs(p.imageSetLister, pools, statusErr) + if err != nil { + klog.Errorf("Failed to get image set apply configs: %v", err) + return + } + + err = upgrademonitor.UpdateMachineConfigNodeStatus( + &upgrademonitor.Condition{ + State: mcfgv1alpha1.MachineConfigNodePinnedImageSetsDegraded, + Reason: "PrefetchFailed", + Message: "Error: " + statusErr.Error(), + }, + nil, + metav1.ConditionTrue, + metav1.ConditionUnknown, + node, + p.mcfgClient, + applyCfg, + imageSets, + p.featureGatesAccessor, + ) + if err != nil { + klog.Errorf("Failed to updated machine config node: %v", err) + } +} + +// getImageSetApplyConfigs populates image set generation details for status updates to MachineConfigNode. +func getImageSetApplyConfigs(imageSetLister mcfglistersv1alpha1.PinnedImageSetLister, pools []*mcfgv1.MachineConfigPool, statusErr error) ([]*machineconfigurationalphav1.MachineConfigNodeStatusPinnedImageSetApplyConfiguration, []mcfgv1alpha1.MachineConfigNodeSpecPinnedImageSet, error) { + var mcnImageSets []mcfgv1alpha1.MachineConfigNodeSpecPinnedImageSet + var imageSetApplyConfigs []*machineconfigurationalphav1.MachineConfigNodeStatusPinnedImageSetApplyConfiguration + for _, pool := range pools { + for _, imageSets := range pool.Spec.PinnedImageSets { + mcnImageSets = append(mcnImageSets, mcfgv1alpha1.MachineConfigNodeSpecPinnedImageSet{ + Name: imageSets.Name, + }) + imageSet, err := imageSetLister.Get(imageSets.Name) + if err != nil { + klog.Errorf("Error getting pinned image set: %v", err) + if apierrors.IsNotFound(err) { + continue + } + return nil, nil, err + } + + imageSetConfig := machineconfigurationalphav1.MachineConfigNodeStatusPinnedImageSet(). + WithName(imageSet.Name). + WithDesiredGeneration(int32(imageSet.GetGeneration())) + if statusErr != nil { + imageSetConfig.LastFailedGeneration = ptr.To(int32(imageSet.GetGeneration())) + imageSetConfig.LastFailedGenerationErrors = []string{statusErr.Error()} + } else { + imageSetConfig.CurrentGeneration = ptr.To(int32(imageSet.GetGeneration())) + } + + imageSetApplyConfigs = append(imageSetApplyConfigs, imageSetConfig) + } + } + return imageSetApplyConfigs, mcnImageSets, nil +} + +// getWorkerCount returns the number of workers to use for prefetching images. +func (p *PinnedImageSetManager) getWorkerCount() (int, error) { + node, err := p.nodeLister.Get(p.nodeName) + if err != nil { + return 0, fmt.Errorf("failed to get node %q: %w", p.nodeName, err) + } + + // default to 5 workers + workerCount := defaultPrefetchWorkers + + // master nodes are a special case and should have a concurrency of 1 to + // mitigate I/O contention with the control plane. + if _, ok := node.ObjectMeta.Labels["node-role.kubernetes.io/master"]; ok { + workerCount = defaultControlPlaneWorkers + } + + return workerCount, nil +} + +// prefetch represents a task to prefetch an image. +type prefetch struct { + image string + auth *runtimeapi.AuthConfig + monitor *prefetchMonitor +} + +// prefetchWorker is a worker that pulls images from the container runtime. +func (p *PinnedImageSetManager) prefetchWorker(ctx context.Context) { + for task := range p.prefetchCh { + if err := ensurePullImage(ctx, p.criClient, p.backoff, task.image, task.auth); err != nil { + task.monitor.Error(err) + klog.Warningf("failed to prefetch image %q: %v", task.image, err) + } + task.monitor.Done() + + // throttle prefetching to avoid overloading the file system + select { + case <-time.After(defaultPrefetchThrottleDuration): + case <-ctx.Done(): + return + } + } +} + +// scheduleWork schedules the prefetch work for the images and collects the first error encountered. +func scheduleWork(ctx context.Context, prefetchCh chan prefetch, registryAuth *registryAuth, prefetchImages []mcfgv1alpha1.PinnedImageRef, monitor *prefetchMonitor) error { + totalImages := len(prefetchImages) + updateIncrement := totalImages / 4 + if updateIncrement == 0 { + updateIncrement = 1 // Ensure there's at least one update if the image count is less than 4 + } + scheduledImages := 0 + for _, imageRef := range prefetchImages { + select { + case <-ctx.Done(): + return ctx.Err() + default: + image := string(imageRef.Name) + authConfig, err := registryAuth.getAuthConfigForImage(image) + if err != nil { + return fmt.Errorf("failed to get auth config for image %s: %w", image, err) + } + monitor.Add(1) + prefetchCh <- prefetch{ + image: image, + auth: authConfig, + monitor: monitor, + } + + scheduledImages++ + // report status every 25% of images scheduled + if scheduledImages%updateIncrement == 0 { + klog.Infof("Completed scheduling %d%% of images", (scheduledImages*100)/totalImages) + } + } + } + return nil +} + +// ensurePullImage first checks if the image exists locally and then will attempt to pull +// the image from the container runtime with a retry/backoff. +func ensurePullImage(ctx context.Context, client *cri.Client, backoff wait.Backoff, image string, authConfig *runtimeapi.AuthConfig) error { + exists, err := client.ImageStatus(ctx, image) + if err != nil { + return err + } + if exists { + klog.V(4).Infof("image %q already exists", image) + return nil + } + + var lastErr error + tries := 0 + err = wait.ExponentialBackoffWithContext(ctx, backoff, func(ctx context.Context) (bool, error) { + tries++ + err := client.PullImage(ctx, image, authConfig) + if err != nil { + lastErr = err + return false, nil + } + return true, nil + }) + // this is only an error if ctx has error or backoff limits are exceeded + if err != nil { + return fmt.Errorf("%w %q (%d tries): %w: %w", errFailedToPullImage, image, tries, err, lastErr) + } + + // successful pull + klog.V(4).Infof("image %q pulled", image) + return nil +} + +func (p *PinnedImageSetManager) crioReload() error { + serviceName := constants.CRIOServiceName + if err := reloadService(serviceName); err != nil { + return fmt.Errorf("could not apply update: reloading %s configuration failed. Error: %w", serviceName, err) + } + return nil +} + +func (p *PinnedImageSetManager) deleteCrioConfigFile() error { + // remove the crio config file + if err := os.Remove(crioPinnedImagesDropInFilePath); err != nil { + return fmt.Errorf("failed to remove crio config file: %w", err) + } + + return nil +} + +func (p *PinnedImageSetManager) hasPinnedImagesConfigFile() (bool, error) { + _, err := os.Stat(crioPinnedImagesDropInFilePath) + if os.IsNotExist(err) { + return false, nil + } + if err != nil { + return false, fmt.Errorf("failed to check crio config file: %w", err) + } + + return true, nil +} + +// createCrioConfigIgnitionFile creates an ignition file for the pinned-image crio config. +func createCrioConfigIgnitionFile(images []string) (ign3types.File, error) { + type tomlConfig struct { + Crio struct { + Image struct { + PinnedImages []string `toml:"pinned_images,omitempty"` + } `toml:"image"` + } `toml:"crio"` + } + + tomlConf := tomlConfig{} + tomlConf.Crio.Image.PinnedImages = images + + // TODO: custom encoder that can write each image on newline + var buf bytes.Buffer + encoder := toml.NewEncoder(&buf) + if err := encoder.Encode(tomlConf); err != nil { + return ign3types.File{}, fmt.Errorf("failed to encode toml: %w", err) + } + + return ctrlcommon.NewIgnFileBytes(crioPinnedImagesDropInFilePath, buf.Bytes()), nil +} + +func (p *PinnedImageSetManager) Run(workers int, stopCh <-chan struct{}) { + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + utilruntime.HandleCrash() + p.queue.ShutDown() + close(p.prefetchCh) + }() + + if !cache.WaitForCacheSync( + stopCh, + p.imageSetSynced, + p.nodeListerSynced, + p.mcpSynced, + ) { + klog.Errorf("failed to sync initial listers cache") + return + } + + workerCount, err := p.getWorkerCount() + if err != nil { + klog.Fatalf("failed to get worker count: %v", err) + return + } + + klog.Infof("Starting PinnedImageSet Manager") + defer klog.Infof("Shutting down PinnedImageSet Manager") + + // start image prefetch workers + for i := 0; i < workerCount; i++ { + go p.prefetchWorker(ctx) + } + + for i := 0; i < workers; i++ { + go wait.Until(p.worker, time.Second, stopCh) + } + + <-stopCh +} + +func (p *PinnedImageSetManager) addPinnedImageSet(obj interface{}) { + imageSet := obj.(*mcfgv1alpha1.PinnedImageSet) + if imageSet.DeletionTimestamp != nil { + p.deletePinnedImageSet(imageSet) + return + } + + node, err := p.nodeLister.Get(p.nodeName) + if err != nil { + klog.Errorf("failed to get node %q: %v", p.nodeName, err) + return + } + + pools, _, err := helpers.GetPoolsForNode(p.mcpLister, node) + if err != nil { + klog.Errorf("error finding pools for node %s: %v", node.Name, err) + return + } + if pools == nil { + return + } + + klog.V(4).Infof("PinnedImageSet %s added", imageSet.Name) + for _, pool := range pools { + if !isImageSetInPool(imageSet.Name, pool) { + continue + } + p.enqueueMachineConfigPool(pool) + } +} + +func (p *PinnedImageSetManager) deletePinnedImageSet(obj interface{}) { + imageSet, ok := obj.(*mcfgv1alpha1.PinnedImageSet) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) + return + } + imageSet, ok = tombstone.Obj.(*mcfgv1alpha1.PinnedImageSet) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a PinnedImageSet %#v", obj)) + return + } + } + + node, err := p.nodeLister.Get(p.nodeName) + if err != nil { + klog.Errorf("failed to get node %q: %v", p.nodeName, err) + return + } + + pools, _, err := helpers.GetPoolsForNode(p.mcpLister, node) + if err != nil { + klog.Errorf("error finding pools for node %s: %v", node.Name, err) + return + } + if pools == nil { + return + } + + klog.V(4).Infof("PinnedImageSet %s delete", imageSet.Name) + for _, pool := range pools { + if !isImageSetInPool(imageSet.Name, pool) { + continue + } + p.enqueueMachineConfigPool(pool) + } +} + +func (p *PinnedImageSetManager) updatePinnedImageSet(oldObj, newObj interface{}) { + oldImageSet := oldObj.(*mcfgv1alpha1.PinnedImageSet) + newImageSet := newObj.(*mcfgv1alpha1.PinnedImageSet) + + imageSet, err := p.imageSetLister.Get(newImageSet.Name) + if apierrors.IsNotFound(err) { + return + } + + node, err := p.nodeLister.Get(p.nodeName) + if err != nil { + klog.Errorf("failed to get node %q: %v", p.nodeName, err) + return + } + + pools, _, err := helpers.GetPoolsForNode(p.mcpLister, node) + if err != nil { + klog.Errorf("error finding pools for node %s: %v", node.Name, err) + return + } + if pools == nil { + return + } + + if triggerPinnedImageSetChange(oldImageSet, newImageSet) { + klog.V(4).Infof("PinnedImageSet %s update", imageSet.Name) + for _, pool := range pools { + if !isImageSetInPool(imageSet.Name, pool) { + continue + } + p.enqueueMachineConfigPool(pool) + } + } +} + +func (p *PinnedImageSetManager) addMachineConfigPool(obj interface{}) { + pool := obj.(*mcfgv1.MachineConfigPool) + if pool.DeletionTimestamp != nil { + p.deleteMachineConfigPool(pool) + return + } + selector, err := metav1.LabelSelectorAsSelector(pool.Spec.NodeSelector) + if err != nil { + klog.Errorf("invalid label selector: %v", err) + return + } + + nodes, err := p.nodeLister.List(selector) + if err != nil { + klog.Errorf("failed to list nodes: %v", err) + return + } + if !isNodeInPool(nodes, p.nodeName) { + return + } + + if pool.Spec.PinnedImageSets == nil { + return + } + + klog.V(4).Infof("MachineConfigPool %s added", pool.Name) + p.enqueueMachineConfigPool(pool) +} + +func (p *PinnedImageSetManager) updateMachineConfigPool(oldObj, newObj interface{}) { + oldPool := oldObj.(*mcfgv1.MachineConfigPool) + newPool := newObj.(*mcfgv1.MachineConfigPool) + + selector, err := metav1.LabelSelectorAsSelector(newPool.Spec.NodeSelector) + if err != nil { + klog.Errorf("invalid label selector: %v", err) + return + } + + nodes, err := p.nodeLister.List(selector) + if err != nil { + klog.Errorf("failed to list nodes: %v", err) + return + } + if !isNodeInPool(nodes, p.nodeName) { + return + } + + if triggerMachineConfigPoolChange(oldPool, newPool) { + p.enqueueMachineConfigPool(newPool) + } +} + +func (p *PinnedImageSetManager) deleteMachineConfigPool(obj interface{}) { + pool, ok := obj.(*mcfgv1.MachineConfigPool) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj)) + return + } + pool, ok = tombstone.Obj.(*mcfgv1.MachineConfigPool) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a PinnedImageSet %#v", obj)) + return + } + } + + selector, err := metav1.LabelSelectorAsSelector(pool.Spec.NodeSelector) + if err != nil { + klog.Errorf("Invalid label selector: %v", err) + return + } + + nodes, err := p.nodeLister.List(selector) + if err != nil { + klog.Errorf("failed to list nodes: %v", err) + return + } + if !isNodeInPool(nodes, p.nodeName) { + return + } + + if err := p.deleteCrioConfigFile(); err != nil { + klog.Errorf("failed to delete crio config file: %v", err) + return + } + + p.crioReload() +} + +func (p *PinnedImageSetManager) enqueue(pool *mcfgv1.MachineConfigPool) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pool) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %w", pool, err)) + return + } + p.queue.Add(key) +} + +// worker runs a worker thread that just dequeues items, processes them, and marks them done. +// It enforces that the syncHandler is never invoked concurrently with the same key. +func (p *PinnedImageSetManager) worker() { + for p.processNextItem() { + } +} + +func (p *PinnedImageSetManager) processNextItem() bool { + key, quit := p.queue.Get() + if quit { + return false + } + defer p.queue.Done(key) + + err := p.syncHandler(key.(string)) + p.handleErr(err, key) + return true +} + +func (p *PinnedImageSetManager) handleErr(err error, key interface{}) { + if err == nil { + p.queue.Forget(key) + return + } + + if p.queue.NumRequeues(key) < maxRetriesController { + klog.V(4).Infof("Requeue MachineConfigPool %v: %v", key, err) + p.queue.AddRateLimited(key) + return + } + utilruntime.HandleError(err) + + klog.Warningf(fmt.Sprintf(" failed: %s max retries: %d", key, maxRetriesController)) + p.queue.Forget(key) + p.queue.AddAfter(key, 1*time.Minute) +} + +func getImageSize(ctx context.Context, imageName string, authFilePath string) (int64, error) { + args := []string{ + "manifest", + "--log-level", "error", // suppress warn log output + "inspect", + "--authfile", authFilePath, + string(imageName), + } + + output, err := exec.CommandContext(ctx, "podman", args...).CombinedOutput() + if err != nil && strings.Contains(err.Error(), "manifest unknown") { + return 0, errNotFound + } + if err != nil { + return 0, fmt.Errorf("failed to execute podman manifest inspect for %q: %w", imageName, err) + } + + var manifest ocispec.Manifest + err = json.Unmarshal(output, &manifest) + if err != nil { + return 0, fmt.Errorf("failed to unmarshal manifest for %q: %w", imageName, err) + } + + var totalSize int64 + for _, layer := range manifest.Layers { + totalSize += int64(layer.Size) + } + + return totalSize, nil +} + +func triggerPinnedImageSetChange(old, new *mcfgv1alpha1.PinnedImageSet) bool { + if old.DeletionTimestamp != new.DeletionTimestamp { + return true + } + if !reflect.DeepEqual(old.Spec, new.Spec) { + return true + } + return false +} + +func triggerMachineConfigPoolChange(old, new *mcfgv1.MachineConfigPool) bool { + if old.DeletionTimestamp != new.DeletionTimestamp { + return true + } + if !reflect.DeepEqual(old.Spec, new.Spec) { + return true + } + return false +} + +// registryAuth manages a map of registry to auth config. +type registryAuth struct { + mu sync.Mutex + auth map[string]*runtimeapi.AuthConfig +} + +func newRegistryAuth(authfile string) (*registryAuth, error) { + data, err := os.ReadFile(authfile) + if err != nil { + return nil, fmt.Errorf("failed to read authfile: %w", err) + } + + type authEntry struct { + Auth string `json:"auth"` + Email string `json:"email"` + } + + type auths struct { + Auths map[string]authEntry `json:"auths"` + } + + var a auths + err = json.Unmarshal(data, &a) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal pull secret: %w", err) + } + + authMap := make(map[string]*runtimeapi.AuthConfig, len(a.Auths)) + for registry, entry := range a.Auths { + // auth is base64 encoded and in the format username:password + decoded, err := base64.StdEncoding.DecodeString(entry.Auth) + if err != nil { + return nil, fmt.Errorf("failed to decode auth for registry %q: %w", registry, err) + } + parts := strings.SplitN(string(decoded), ":", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid auth for registry %q", registry) + } + authMap[registry] = &runtimeapi.AuthConfig{ + Username: parts[0], + Password: parts[1], + } + } + + return ®istryAuth{auth: authMap}, nil +} + +func (r *registryAuth) getAuthConfigForImage(image string) (*runtimeapi.AuthConfig, error) { + r.mu.Lock() + defer r.mu.Unlock() + + parsed, err := reference.ParseNormalizedNamed(strings.TrimSpace(image)) + if err != nil { + return nil, err + } + + auth, ok := r.auth[reference.Domain(parsed)] + if !ok { + return nil, fmt.Errorf("%w: %q", errNoAuthForImage, image) + } + return auth, nil +} diff --git a/pkg/daemon/pinned_image_set_test.go b/pkg/daemon/pinned_image_set_test.go new file mode 100644 index 0000000000..1beba13584 --- /dev/null +++ b/pkg/daemon/pinned_image_set_test.go @@ -0,0 +1,622 @@ +package daemon + +import ( + "context" + "flag" + "fmt" + "net" + "os" + "path/filepath" + "testing" + "time" + + "github.com/golang/groupcache/lru" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + fakemco "github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake" + mcfginformers "github.com/openshift/client-go/machineconfiguration/informers/externalversions" + "github.com/openshift/machine-config-operator/pkg/daemon/cri" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" + v1 "k8s.io/cri-api/pkg/apis/runtime/v1" + apitest "k8s.io/cri-api/pkg/apis/testing" + "k8s.io/klog/v2" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestMain(m *testing.M) { + klog.InitFlags(nil) + flag.Set("logtostderr", "true") + flag.Set("v", "2") + flag.Parse() + os.Exit(m.Run()) +} + +const authFileContents = `{"auths":{ + "quay.io": {"auth": "Zm9vOmJhcg=="}, + "quay.io:443": {"auth": "Zm9vOmJheg=="} +}}` + +func TestRegistryAuth(t *testing.T) { + require := require.New(t) + authFilePath := filepath.Join(t.TempDir(), "auth.json") + bogusFilePath := filepath.Join(t.TempDir(), "bogus.json") + err := os.WriteFile(authFilePath, []byte(authFileContents), 0644) + require.NoError(err) + tests := []struct { + name string + authFile string + image string + wantAuth v1.AuthConfig + wantErr error + wantImageErr error + }{ + { + name: "empty auth file", + authFile: bogusFilePath, + wantErr: os.ErrNotExist, + }, + { + name: "valid auth file", + authFile: authFilePath, + image: "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6134ca5fbcf8e4007de2d319758dcd7b4e5ded97a00c78a7ff490c1f56579d49", + wantAuth: v1.AuthConfig{ + Username: "foo", + Password: "bar", + }, + }, + { + name: "valid auth file with port", + authFile: authFilePath, + image: "quay.io:443/openshift-release-dev/ocp-v4.0-art-dev@sha256:6134ca5fbcf8e4007de2d319758dcd7b4e5ded97a00c78a7ff490c1f56579d49", + wantAuth: v1.AuthConfig{ + Username: "foo", + Password: "baz", + }, + }, + { + name: "no valid registry auth for image", + authFile: authFilePath, + image: "docker.io/no/auth", + wantImageErr: errNoAuthForImage, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + registry, err := newRegistryAuth(tt.authFile) + if tt.wantErr != nil { + require.ErrorIs(err, tt.wantErr) + return + } + require.NoError(err) + cfg, err := registry.getAuthConfigForImage(tt.image) + if tt.wantImageErr != nil { + require.ErrorIs(err, tt.wantImageErr) + return + } + require.NoError(err) + require.NotNil(cfg) + require.Equal(tt.wantAuth, *cfg) + }) + } +} + +func TestEnsurePinnedImageSetForNode(t *testing.T) { + require := require.New(t) + tests := []struct { + name string + nodes []runtime.Object + machineConfigPools []runtime.Object + wantValidTarget bool + wantErr error + wantNotFound bool + }{ + { + name: "happy path", + machineConfigPools: []runtime.Object{ + fakeWorkerPoolPinnedImageSets, + }, + nodes: []runtime.Object{ + fakeStableStorageWorkerNode, + }, + wantValidTarget: true, + }, + { + name: "pool has no pinned image sets", + machineConfigPools: []runtime.Object{ + fakeWorkerPoolNoPinnedImageSets, + }, + nodes: []runtime.Object{ + fakeStableStorageWorkerNode, + }, + wantValidTarget: true, + }, + { + name: "no matching pools for node", + machineConfigPools: []runtime.Object{ + fakeMasterPoolPinnedImageSets, + }, + nodes: []runtime.Object{ + fakeStableStorageWorkerNode, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + fakeMCOClient := fakemco.NewSimpleClientset(tt.machineConfigPools...) + fakeClient := fake.NewSimpleClientset(tt.nodes...) + sharedInformers := mcfginformers.NewSharedInformerFactory(fakeMCOClient, noResyncPeriodFunc()) + informerFactory := informers.NewSharedInformerFactory(fakeClient, noResyncPeriodFunc()) + mcpInformer := sharedInformers.Machineconfiguration().V1().MachineConfigPools() + nodeInformer := informerFactory.Core().V1().Nodes() + + informerFactory.Start(ctx.Done()) + informerFactory.WaitForCacheSync(ctx.Done()) + sharedInformers.Start(ctx.Done()) + sharedInformers.WaitForCacheSync(ctx.Done()) + + for _, mcp := range tt.machineConfigPools { + err := mcpInformer.Informer().GetIndexer().Add(mcp) + require.NoError(err) + } + for _, node := range tt.nodes { + err := nodeInformer.Informer().GetIndexer().Add(node) + require.NoError(err) + } + + var localNodeName string + if len(tt.nodes) == 0 { + localNodeName = "bogus" + } else { + localNode, ok := tt.nodes[0].(*corev1.Node) + require.True(ok) + localNodeName = localNode.Name + } + + pool := tt.machineConfigPools[0].(*mcfgv1.MachineConfigPool) + validTarget, err := validateTargetPoolForNode(localNodeName, nodeInformer.Lister(), pool) + require.NoError(err) + require.Equal(tt.wantValidTarget, validTarget) + + }) + } +} + +func TestPrefetchImageSets(t *testing.T) { + require := require.New(t) + + // populate authfile + authFilePath := filepath.Join(t.TempDir(), "auth.json") + err := os.WriteFile(authFilePath, []byte(authFileContents), 0644) + require.NoError(err) + + tests := []struct { + name string + nodes []runtime.Object + machineConfigPools []runtime.Object + imageSets []runtime.Object + registryAvailableImages []string + authFilePath string + wantValidTarget bool + wantErr error + wantNotFound bool + wantPulledImages int + }{ + { + name: "happy path", + authFilePath: authFilePath, + machineConfigPools: []runtime.Object{ + fakeWorkerPoolPinnedImageSets, + }, + nodes: []runtime.Object{ + fakeStableStorageWorkerNode, + }, + wantValidTarget: true, + imageSets: []runtime.Object{ + &mcfgv1alpha1.PinnedImageSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-pinned-images", + }, + Spec: mcfgv1alpha1.PinnedImageSetSpec{ + PinnedImages: []mcfgv1alpha1.PinnedImageRef{ + { + Name: availableImage, + }, + }, + }, + }, + }, + registryAvailableImages: []string{availableImage}, + wantPulledImages: 1, + }, + { + name: "timeout error while pull", + authFilePath: authFilePath, + machineConfigPools: []runtime.Object{ + fakeWorkerPoolPinnedImageSets, + }, + nodes: []runtime.Object{ + fakeStableStorageWorkerNode, + }, + wantValidTarget: true, + imageSets: []runtime.Object{ + &mcfgv1alpha1.PinnedImageSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-pinned-images", + }, + Spec: mcfgv1alpha1.PinnedImageSetSpec{ + PinnedImages: []mcfgv1alpha1.PinnedImageRef{ + { + Name: slowImage, + }, + }, + }, + }, + }, + registryAvailableImages: []string{availableImage}, + wantErr: wait.ErrWaitTimeout, + }, + { + name: "image not found", + authFilePath: authFilePath, + machineConfigPools: []runtime.Object{ + fakeWorkerPoolPinnedImageSets, + }, + nodes: []runtime.Object{ + fakeStableStorageWorkerNode, + }, + wantValidTarget: true, + imageSets: []runtime.Object{ + &mcfgv1alpha1.PinnedImageSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-pinned-images", + }, + Spec: mcfgv1alpha1.PinnedImageSetSpec{ + PinnedImages: []mcfgv1alpha1.PinnedImageRef{ + { + Name: availableImage, + }, + }, + }, + }, + }, + registryAvailableImages: nil, + wantErr: errFailedToPullImage, + }, + { + name: "invalid auth file", + authFilePath: "/etc/bogus", + machineConfigPools: []runtime.Object{ + fakeWorkerPoolPinnedImageSets, + }, + nodes: []runtime.Object{ + fakeStableStorageWorkerNode, + }, + wantValidTarget: true, + imageSets: []runtime.Object{ + &mcfgv1alpha1.PinnedImageSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-pinned-images", + }, + Spec: mcfgv1alpha1.PinnedImageSetSpec{ + PinnedImages: []mcfgv1alpha1.PinnedImageRef{ + { + Name: availableImage, + }, + }, + }, + }, + }, + registryAvailableImages: nil, + wantErr: os.ErrNotExist, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + fakeMCOClient := fakemco.NewSimpleClientset(tt.machineConfigPools...) + fakeClient := fake.NewSimpleClientset(tt.nodes...) + sharedInformers := mcfginformers.NewSharedInformerFactory(fakeMCOClient, noResyncPeriodFunc()) + informerFactory := informers.NewSharedInformerFactory(fakeClient, noResyncPeriodFunc()) + mcpInformer := sharedInformers.Machineconfiguration().V1().MachineConfigPools() + imageSetInformer := sharedInformers.Machineconfiguration().V1alpha1().PinnedImageSets() + nodeInformer := informerFactory.Core().V1().Nodes() + + informerFactory.Start(ctx.Done()) + informerFactory.WaitForCacheSync(ctx.Done()) + sharedInformers.Start(ctx.Done()) + sharedInformers.WaitForCacheSync(ctx.Done()) + + for _, mcp := range tt.machineConfigPools { + err := mcpInformer.Informer().GetIndexer().Add(mcp) + require.NoError(err) + } + for _, node := range tt.nodes { + err := nodeInformer.Informer().GetIndexer().Add(node) + require.NoError(err) + } + for _, imageSet := range tt.imageSets { + err := imageSetInformer.Informer().GetIndexer().Add(imageSet) + require.NoError(err) + } + + var localNodeName string + if len(tt.nodes) == 0 { + localNodeName = "bogus" + } else { + localNode, ok := tt.nodes[0].(*corev1.Node) + require.True(ok) + localNodeName = localNode.Name + } + + // setup fake cri runtime with client + runtime := newFakeRuntime([]string{}, tt.registryAvailableImages) + listener, err := newTestListener() + require.NoError(err) + err = runtime.Start(listener) + require.NoError(err) + defer runtime.Stop() + runtimeEndpoint := listener.Addr().String() + + criClient, err := cri.NewClient(ctx, runtimeEndpoint) + require.NoError(err) + + p := &PinnedImageSetManager{ + nodeName: localNodeName, + criClient: criClient, + authFilePath: tt.authFilePath, + imageSetLister: imageSetInformer.Lister(), + imageSetSynced: imageSetInformer.Informer().HasSynced, + nodeLister: nodeInformer.Lister(), + nodeListerSynced: nodeInformer.Informer().HasSynced, + prefetchCh: make(chan prefetch, defaultPrefetchWorkers*2), + backoff: wait.Backoff{ + Steps: 1, + Duration: 10 * time.Millisecond, + Factor: retryFactor, + Cap: 10 * time.Millisecond, + }, + cache: lru.New(256), + } + + imageSets := make([]*mcfgv1alpha1.PinnedImageSet, len(tt.imageSets)) + for i, obj := range tt.imageSets { + imageSet, ok := obj.(*mcfgv1alpha1.PinnedImageSet) + require.True(ok) + imageSets[i] = imageSet + } + + // start image prefetch worker + go func() { + p.prefetchWorker(ctx) + }() + + err = p.prefetchImageSets(ctx, imageSets...) + if tt.wantErr != nil { + require.ErrorIs(err, tt.wantErr) + return + } + require.NoError(err) + require.Equal(tt.wantPulledImages, runtime.imagesPulled()) + }) + } +} + +var ( + fakeStableStorageWorkerNode = fakeNode( + "worker", + "100Gi", + ).DeepCopy() + + fakeWorkerPoolPinnedImageSets = fakeMachineConfigPool( + "worker", + map[string]string{ + "pools.operator.machineconfiguration.openshift.io/worker": "", + }, + []mcfgv1.PinnedImageSetRef{ + { + Name: "fake-pinned-images", + }, + }, + ) + + fakeWorkerPoolNoPinnedImageSets = fakeMachineConfigPool( + "worker", + map[string]string{ + "pools.operator.machineconfiguration.openshift.io/worker": "", + }, + []mcfgv1.PinnedImageSetRef{}, + ) + + fakeMasterPoolPinnedImageSets = fakeMachineConfigPool( + "master", + map[string]string{ + "pools.operator.machineconfiguration.openshift.io/master": "", + "pinnedimageset-opt-in": "true", + }, + []mcfgv1.PinnedImageSetRef{ + { + Name: "fake-master-pinned-images", + }, + }, + ) +) + +func fakeNode(role, allocatableStorage string) *corev1.Node { + return &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-" + role, + Labels: map[string]string{ + fmt.Sprintf("node-role.kubernetes.io/%s", role): "", + }, + }, + Status: corev1.NodeStatus{ + Capacity: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse(allocatableStorage), + corev1.ResourceEphemeralStorage: resource.MustParse(allocatableStorage), + }, + }, + } +} + +func fakeMachineConfigPool(name string, labels map[string]string, pinnedImageSets []mcfgv1.PinnedImageSetRef) *mcfgv1.MachineConfigPool { + return &mcfgv1.MachineConfigPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + Spec: mcfgv1.MachineConfigPoolSpec{ + PinnedImageSets: pinnedImageSets, + MachineConfigSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "machineconfiguration.openshift.io/role": name, + }, + }, + NodeSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + fmt.Sprintf("node-role.kubernetes.io/%s", name): "", + }, + }, + }, + } +} + +const ( + availableImage = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0000000000000000000000000000000000000000000000000000000000000000" + unavailableImage = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + // slowImage is a fake image that is used to block the image puller. This simulates a slow image pull of 1 seconds. + slowImage = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" +) + +var _ runtimeapi.ImageServiceServer = (*FakeRuntime)(nil) + +// FakeRuntime represents a fake remote container runtime. +type FakeRuntime struct { + server *grpc.Server + // Fake runtime service. + ImageService *apitest.FakeImageService + + // images that are already stored in the fake runtime. + localImages []runtimeapi.Image + // images that are available to be pulled. + availableImages []runtimeapi.Image + // number of images pulled. + pulledImages int +} + +// newFakeRuntime creates a new FakeRuntime. +func newFakeRuntime(localImages, availableImages []string) *FakeRuntime { + f := &FakeRuntime{ + server: grpc.NewServer(), + ImageService: apitest.NewFakeImageService(), + } + for _, image := range localImages { + f.localImages = append(f.localImages, runtimeapi.Image{ + Spec: &runtimeapi.ImageSpec{ + Image: image, + }, + Pinned: true, + }) + } + + for _, image := range availableImages { + f.availableImages = append(f.availableImages, runtimeapi.Image{ + Spec: &runtimeapi.ImageSpec{ + Image: image, + }, + }) + } + + runtimeapi.RegisterImageServiceServer(f.server, f) + return f +} + +// ImageFsInfo implements v1.ImageServiceServer. +func (r *FakeRuntime) ImageFsInfo(ctx context.Context, req *runtimeapi.ImageFsInfoRequest) (*runtimeapi.ImageFsInfoResponse, error) { + return r.ImageService.ImageFsInfo(ctx) +} + +// ImageStatus implements v1.ImageServiceServer. +func (r *FakeRuntime) ImageStatus(_ context.Context, req *runtimeapi.ImageStatusRequest) (*runtimeapi.ImageStatusResponse, error) { + image := req.Image.Image + + for _, img := range r.localImages { + if img.Spec.Image == image { + return &runtimeapi.ImageStatusResponse{ + Image: &img, + }, nil + } + } + + return &runtimeapi.ImageStatusResponse{}, nil +} + +// ListImages implements v1.ImageServiceServer. +func (*FakeRuntime) ListImages(context.Context, *runtimeapi.ListImagesRequest) (*runtimeapi.ListImagesResponse, error) { + panic("unimplemented") +} + +// PullImage implements v1.ImageServiceServer. +func (r *FakeRuntime) PullImage(ctx context.Context, req *runtimeapi.PullImageRequest) (*runtimeapi.PullImageResponse, error) { + if req.Image.Image == slowImage { + time.Sleep(1 * time.Second) + } + + resp, err := r.ImageService.PullImage(ctx, req.Image, req.Auth, req.SandboxConfig) + if err != nil { + return nil, err + } + for _, img := range r.availableImages { + if img.Spec.Image == resp { + r.pulledImages++ + return &runtimeapi.PullImageResponse{ + ImageRef: resp, + }, nil + } + } + + return nil, status.Error(codes.NotFound, "image not found") +} + +// RemoveImage implements v1.ImageServiceServer. +func (*FakeRuntime) RemoveImage(context.Context, *runtimeapi.RemoveImageRequest) (*runtimeapi.RemoveImageResponse, error) { + panic("unimplemented") +} + +// Start starts the fake remote runtime. +func (f *FakeRuntime) Start(listener net.Listener) error { + go func() { + _ = f.server.Serve(listener) + }() + return nil +} + +func (f *FakeRuntime) imagesPulled() int { + return f.pulledImages +} + +// returns a TCP listener listening against the next available port +// on the system bound to localhost. +func newTestListener() (net.Listener, error) { + return net.Listen("tcp", "127.0.0.1:") +} + +// Stop stops the fake remote runtime. +func (f *FakeRuntime) Stop() { + f.server.Stop() +} diff --git a/pkg/upgrademonitor/upgrade_monitor.go b/pkg/upgrademonitor/upgrade_monitor.go index 246e4c0798..9ca7f93028 100644 --- a/pkg/upgrademonitor/upgrade_monitor.go +++ b/pkg/upgrademonitor/upgrade_monitor.go @@ -9,6 +9,7 @@ import ( "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/utils/ptr" v1 "github.com/openshift/api/config/v1" mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1" @@ -29,7 +30,43 @@ type Condition struct { // 2) the desiredConfig in the MCN Status will only be set once the update is proven to be compatible. Meanwhile the desired and current config in the spec react to live changes of state on the Node // 3) None of this will be executed unless the TechPreviewNoUpgrade featuregate is applied. // nolint:gocyclo -func GenerateAndApplyMachineConfigNodes(parentCondition, childCondition *Condition, parentStatus, childStatus metav1.ConditionStatus, node *corev1.Node, mcfgClient mcfgclientset.Interface, fgAccessor featuregates.FeatureGateAccess) error { +func GenerateAndApplyMachineConfigNodes( + parentCondition, + childCondition *Condition, + parentStatus, + childStatus metav1.ConditionStatus, + node *corev1.Node, + mcfgClient mcfgclientset.Interface, + fgAccessor featuregates.FeatureGateAccess, +) error { + return generateAndApplyMachineConfigNodes(parentCondition, childCondition, parentStatus, childStatus, node, mcfgClient, nil, nil, fgAccessor) +} + +func UpdateMachineConfigNodeStatus( + parentCondition, + childCondition *Condition, + parentStatus, + childStatus metav1.ConditionStatus, + node *corev1.Node, + mcfgClient mcfgclientset.Interface, + imageSetApplyConfig []*machineconfigurationalphav1.MachineConfigNodeStatusPinnedImageSetApplyConfiguration, + imageSetSpec []mcfgalphav1.MachineConfigNodeSpecPinnedImageSet, + fgAccessor featuregates.FeatureGateAccess, +) error { + return generateAndApplyMachineConfigNodes(parentCondition, childCondition, parentStatus, childStatus, node, mcfgClient, imageSetApplyConfig, imageSetSpec, fgAccessor) +} + +func generateAndApplyMachineConfigNodes( + parentCondition, + childCondition *Condition, + parentStatus, + childStatus metav1.ConditionStatus, + node *corev1.Node, + mcfgClient mcfgclientset.Interface, + imageSetApplyConfig []*machineconfigurationalphav1.MachineConfigNodeStatusPinnedImageSetApplyConfiguration, + imageSetSpec []mcfgalphav1.MachineConfigNodeSpecPinnedImageSet, + fgAccessor featuregates.FeatureGateAccess, +) error { if fgAccessor == nil || node == nil || parentCondition == nil || mcfgClient == nil { return nil } @@ -92,6 +129,8 @@ func GenerateAndApplyMachineConfigNodes(parentCondition, childCondition *Conditi mcfgalphav1.MachineConfigNodeUpdateReloaded, mcfgalphav1.MachineConfigNodeUpdated, mcfgalphav1.MachineConfigNodeUpdateUncordoned, + mcfgalphav1.MachineConfigNodePinnedImageSetsDegraded, + mcfgalphav1.MachineConfigNodePinnedImageSetsProgressing, } // create all of the conditions, even the false ones if newMCNode.Status.Conditions == nil { @@ -197,7 +236,26 @@ func GenerateAndApplyMachineConfigNodes(parentCondition, childCondition *Conditi if node.Annotations["machineconfiguration.openshift.io/currentConfig"] != "" { statusconfigVersionApplyConfig = statusconfigVersionApplyConfig.WithCurrent(newMCNode.Status.ConfigVersion.Current) } - statusApplyConfig := machineconfigurationalphav1.MachineConfigNodeStatus().WithConditions(newMCNode.Status.Conditions...).WithObservedGeneration(newMCNode.Generation + 1).WithConfigVersion(statusconfigVersionApplyConfig) + statusApplyConfig := machineconfigurationalphav1.MachineConfigNodeStatus(). + WithConditions(newMCNode.Status.Conditions...). + WithObservedGeneration(newMCNode.Generation + 1). + WithConfigVersion(statusconfigVersionApplyConfig) + + if imageSetApplyConfig != nil { + statusApplyConfig = statusApplyConfig.WithPinnedImageSets(imageSetApplyConfig...) + } else { + // use the existing image sets + for _, imageSet := range newMCNode.Status.PinnedImageSets { + statusApplyConfig = statusApplyConfig.WithPinnedImageSets(&machineconfigurationalphav1.MachineConfigNodeStatusPinnedImageSetApplyConfiguration{ + DesiredGeneration: ptr.To(imageSet.DesiredGeneration), + CurrentGeneration: ptr.To(imageSet.CurrentGeneration), + Name: ptr.To(imageSet.Name), + LastFailedGeneration: ptr.To(imageSet.LastFailedGeneration), + LastFailedGenerationErrors: imageSet.LastFailedGenerationErrors, + }) + } + } + mcnodeApplyConfig := machineconfigurationalphav1.MachineConfigNode(newMCNode.Name).WithStatus(statusApplyConfig) _, err := mcfgClient.MachineconfigurationV1alpha1().MachineConfigNodes().ApplyStatus(context.TODO(), mcnodeApplyConfig, metav1.ApplyOptions{FieldManager: "machine-config-operator", Force: true}) if err != nil { @@ -215,6 +273,10 @@ func GenerateAndApplyMachineConfigNodes(parentCondition, childCondition *Conditi newMCNode.Name = node.Name newMCNode.Spec.Pool = mcfgalphav1.MCOObjectReference{Name: pool} newMCNode.Spec.Node = mcfgalphav1.MCOObjectReference{Name: node.Name} + if imageSetSpec != nil { + newMCNode.Spec.PinnedImageSets = imageSetSpec + } + _, err := mcfgClient.MachineconfigurationV1alpha1().MachineConfigNodes().Create(context.TODO(), newMCNode, metav1.CreateOptions{}) if err != nil { klog.Errorf("Error creating MCN: %v", err) @@ -297,3 +359,20 @@ func createOrGetMachineConfigNode(mcfgClient mcfgclientset.Interface, node *core return mcNode, false } + +type ApplyCallback struct { + StatusConfigFn func(applyConfig *machineconfigurationalphav1.MachineConfigNodeStatusApplyConfiguration) + MachineConfigNodeFn func(*mcfgalphav1.MachineConfigNode) +} + +func applyStatusConfig(cfg *machineconfigurationalphav1.MachineConfigNodeStatusApplyConfiguration, applyCallback ...*ApplyCallback) { + for _, apply := range applyCallback { + apply.StatusConfigFn(cfg) + } +} + +func applyMachineConfigNode(mcn *mcfgalphav1.MachineConfigNode, applyCallback ...*ApplyCallback) { + for _, apply := range applyCallback { + apply.MachineConfigNodeFn(mcn) + } +} From ce9933ff6c1fe7a2c7d30f3eb89dcb8d3fab461e Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Sat, 13 Apr 2024 16:03:27 -0400 Subject: [PATCH 04/18] controller: add pinned image set controller Signed-off-by: Sam Batschelet --- cmd/machine-config-controller/start.go | 19 + .../pinnedimageset/pinned_image_set.go | 399 ++++++++++++++++++ .../pinnedimageset/pinned_image_set_test.go | 179 ++++++++ pkg/daemon/pinned_image_set.go | 15 +- pkg/upgrademonitor/upgrade_monitor.go | 2 +- 5 files changed, 604 insertions(+), 10 deletions(-) create mode 100644 pkg/controller/pinnedimageset/pinned_image_set.go create mode 100644 pkg/controller/pinnedimageset/pinned_image_set_test.go diff --git a/cmd/machine-config-controller/start.go b/cmd/machine-config-controller/start.go index 432577f80b..2a4a12a505 100644 --- a/cmd/machine-config-controller/start.go +++ b/cmd/machine-config-controller/start.go @@ -7,6 +7,7 @@ import ( "os" "time" + configv1 "github.com/openshift/api/config/v1" "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" "github.com/openshift/machine-config-operator/cmd/common" "github.com/openshift/machine-config-operator/internal/clients" @@ -16,6 +17,7 @@ import ( kubeletconfig "github.com/openshift/machine-config-operator/pkg/controller/kubelet-config" machinesetbootimage "github.com/openshift/machine-config-operator/pkg/controller/machine-set-boot-image" "github.com/openshift/machine-config-operator/pkg/controller/node" + "github.com/openshift/machine-config-operator/pkg/controller/pinnedimageset" "github.com/openshift/machine-config-operator/pkg/controller/render" "github.com/openshift/machine-config-operator/pkg/controller/template" "github.com/openshift/machine-config-operator/pkg/version" @@ -82,6 +84,13 @@ func runStartCmd(_ *cobra.Command, _ []string) { ctrlctx.FeatureGateAccess, ) + pinnedImageSet := pinnedimageset.New( + ctrlctx.InformerFactory.Machineconfiguration().V1alpha1().PinnedImageSets(), + ctrlctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), + ctrlctx.ClientBuilder.KubeClientOrDie("pinned-image-set-controller"), + ctrlctx.ClientBuilder.MachineConfigClientOrDie("pinned-image-set-controller"), + ) + // Start the shared factory informers that you need to use in your controller ctrlctx.InformerFactory.Start(ctrlctx.Stop) ctrlctx.KubeInformerFactory.Start(ctrlctx.Stop) @@ -113,6 +122,16 @@ func runStartCmd(_ *cobra.Command, _ []string) { } go draincontroller.Run(5, ctrlctx.Stop) + fg, err := ctrlctx.FeatureGateAccess.CurrentFeatureGates() + if err != nil { + klog.Fatalf("Could not get feature gate: %v", err) + } + if fg.Enabled(configv1.FeatureGatePinnedImages) { + go pinnedImageSet.Run(2, ctrlctx.Stop) + } else { + klog.Infof("FeatureGate %s is disabled, not starting the pinned image set controller", configv1.FeatureGatePinnedImages) + } + // wait here in this function until the context gets cancelled (which tells us whe were being shut down) <-ctx.Done() } diff --git a/pkg/controller/pinnedimageset/pinned_image_set.go b/pkg/controller/pinnedimageset/pinned_image_set.go new file mode 100644 index 0000000000..6e921db4df --- /dev/null +++ b/pkg/controller/pinnedimageset/pinned_image_set.go @@ -0,0 +1,399 @@ +package pinnedimageset + +import ( + "context" + "fmt" + "reflect" + "sort" + "time" + + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" + "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" + mcfginformersv1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1" + mcfginformersv1alpha1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1" + mcfglistersv1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1" + mcfglistersv1alpha1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1" + "github.com/openshift/machine-config-operator/pkg/apihelpers" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +const ( + // maxRetries is the number of times a machineconfig pool will be retried before it is dropped out of the queue. + // With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times + // a machineconfig pool is going to be requeued: + // + // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s + maxRetries = 15 + + // delay is a pause to avoid churn in PinnedImageSets; see + delay = 5 * time.Second +) + +// Controller defines the render controller. +type Controller struct { + client mcfgclientset.Interface + eventRecorder record.EventRecorder + + syncHandler func(mcp string) error + enqueueMachineConfigPool func(*mcfgv1.MachineConfigPool) + + mcpLister mcfglistersv1.MachineConfigPoolLister + mcpListerSynced cache.InformerSynced + + imageSetLister mcfglistersv1alpha1.PinnedImageSetLister + imageSetSynced cache.InformerSynced + + queue workqueue.RateLimitingInterface +} + +// New returns a new pinned image set controller. +func New( + imageSetInformer mcfginformersv1alpha1.PinnedImageSetInformer, + mcpInformer mcfginformersv1.MachineConfigPoolInformer, + kubeClient clientset.Interface, + mcfgClient mcfgclientset.Interface, +) *Controller { + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(klog.Infof) + eventBroadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) + + ctrl := &Controller{ + client: mcfgClient, + + eventRecorder: ctrlcommon.NamespacedEventRecorder(eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineconfigcontroller-pinnedimagesetcontroller"})), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineconfigcontroller-pinnedimagesetcontroller"), + } + + mcpInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addMachineConfigPool, + UpdateFunc: ctrl.updateMachineConfigPool, + DeleteFunc: ctrl.deleteMachineConfigPool, + }) + imageSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: ctrl.addPinnedImageSet, + UpdateFunc: ctrl.updatePinnedImageSet, + DeleteFunc: ctrl.deletePinnedImageSet, + }) + + ctrl.syncHandler = ctrl.syncMachineConfigPool + ctrl.enqueueMachineConfigPool = ctrl.enqueueDefault + + ctrl.mcpLister = mcpInformer.Lister() + ctrl.mcpListerSynced = mcpInformer.Informer().HasSynced + + ctrl.imageSetLister = imageSetInformer.Lister() + ctrl.imageSetSynced = imageSetInformer.Informer().HasSynced + + return ctrl +} + +// Run executes the render controller. +func (ctrl *Controller) Run(workers int, stopCh <-chan struct{}) { + klog.Info("Starting MachineConfigController-PinnedImageSetController") + defer utilruntime.HandleCrash() + defer ctrl.queue.ShutDown() + + if !cache.WaitForCacheSync(stopCh, ctrl.mcpListerSynced, ctrl.imageSetSynced) { + return + } + + klog.Info("Starting MachineConfigController-PinnedImageSetController") + defer klog.Info("Shutting down MachineConfigController-PinnedImageSetController") + + for i := 0; i < workers; i++ { + go wait.Until(ctrl.worker, time.Second, stopCh) + } + + <-stopCh +} + +func (ctrl *Controller) addMachineConfigPool(obj interface{}) { + pool := obj.(*mcfgv1.MachineConfigPool) + klog.V(4).Infof("Adding MachineConfigPool %s", pool.Name) + ctrl.enqueueMachineConfigPool(pool) +} + +func (ctrl *Controller) updateMachineConfigPool(old, cur interface{}) { + oldPool := old.(*mcfgv1.MachineConfigPool) + curPool := cur.(*mcfgv1.MachineConfigPool) + + klog.V(4).Infof("Updating MachineConfigPool %s", oldPool.Name) + ctrl.enqueueMachineConfigPool(curPool) +} + +func (ctrl *Controller) deleteMachineConfigPool(obj interface{}) { + pool, ok := obj.(*mcfgv1.MachineConfigPool) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("failed to get object from tombstone %#v", obj)) + return + } + pool, ok = tombstone.Obj.(*mcfgv1.MachineConfigPool) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a MachineConfigPool %#v", obj)) + return + } + } + klog.V(4).Infof("Deleting MachineConfigPool %s", pool.Name) +} + +func (ctrl *Controller) addPinnedImageSet(obj interface{}) { + imageSet := obj.(*mcfgv1alpha1.PinnedImageSet) + if imageSet.DeletionTimestamp != nil { + ctrl.deletePinnedImageSet(imageSet) + return + } + + pools, err := ctrl.getPoolsForPinnedImageSet(imageSet) + if err != nil { + klog.Errorf("error finding pools for pinned image set: %v", err) + return + } + + klog.V(4).Infof("PinnedImageSet %s added", imageSet.Name) + for _, p := range pools { + ctrl.enqueueMachineConfigPool(p) + } +} + +func (ctrl *Controller) updatePinnedImageSet(old, cur interface{}) { + oldImageSet := old.(*mcfgv1alpha1.PinnedImageSet) + newImageSet := cur.(*mcfgv1alpha1.PinnedImageSet) + + pools, err := ctrl.getPoolsForPinnedImageSet(newImageSet) + if err != nil { + klog.Errorf("error finding pools for pinned image set: %v", err) + return + } + + klog.V(4).Infof("PinnedImageSet %s updated", newImageSet.Name) + if triggerPinnedImageSetChange(oldImageSet, newImageSet) { + for _, p := range pools { + ctrl.enqueueMachineConfigPool(p) + } + } +} + +func triggerPinnedImageSetChange(old, new *mcfgv1alpha1.PinnedImageSet) bool { + if old.DeletionTimestamp != new.DeletionTimestamp { + return true + } + if !reflect.DeepEqual(old.Spec, new.Spec) { + return true + } + return false +} + +func (ctrl *Controller) deletePinnedImageSet(obj interface{}) { + imageSet, ok := obj.(*mcfgv1alpha1.PinnedImageSet) + if !ok { + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if !ok { + utilruntime.HandleError(fmt.Errorf("failed to get object from tombstone %#v", obj)) + return + } + imageSet, ok = tombstone.Obj.(*mcfgv1alpha1.PinnedImageSet) + if !ok { + utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a PinnedImageSet %#v", obj)) + return + } + } + + pools, err := ctrl.getPoolsForPinnedImageSet(imageSet) + if err != nil { + klog.Errorf("error finding pools for pinned image set: %v", err) + return + } + + klog.V(4).Infof("PinnedImageSet %s deleted", imageSet.Name) + for _, p := range pools { + ctrl.enqueueMachineConfigPool(p) + } +} + +func (ctrl *Controller) getPoolsForPinnedImageSet(imageSet *mcfgv1alpha1.PinnedImageSet) ([]*mcfgv1.MachineConfigPool, error) { + if len(imageSet.Labels) == 0 { + return nil, fmt.Errorf("could not find any MachineConfigPool set for PinnedImageSet %s with labels: %v", imageSet.Name, imageSet.Labels) + } + + pList, err := ctrl.mcpLister.List(labels.Everything()) + if err != nil { + return nil, err + } + + var pools []*mcfgv1.MachineConfigPool + for _, p := range pList { + selector, err := metav1.LabelSelectorAsSelector(p.Spec.MachineConfigSelector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %w", err) + } + + // If a pool with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(imageSet.Labels)) { + continue + } + + pools = append(pools, p) + } + + if len(pools) == 0 { + return nil, fmt.Errorf("could not find any MachineConfigPool set for PinnedImageSet %s with labels: %v", imageSet.Name, imageSet.Labels) + } + return pools, nil +} + +// enqueueAfter will enqueue a pool after the provided amount of time. +func (ctrl *Controller) enqueueAfter(pool *mcfgv1.MachineConfigPool, after time.Duration) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(pool) + if err != nil { + utilruntime.HandleError(fmt.Errorf("failed to get key for object %#v: %w", pool, err)) + return + } + + ctrl.queue.AddAfter(key, after) +} + +// enqueueDefault calls a default enqueue function +func (ctrl *Controller) enqueueDefault(pool *mcfgv1.MachineConfigPool) { + ctrl.enqueueAfter(pool, delay) +} + +// worker runs a worker thread that just dequeues items, processes them, and marks them done. +// It enforces that the syncHandler is never invoked concurrently with the same key. +func (ctrl *Controller) worker() { + for ctrl.processNextWorkItem() { + } +} + +func (ctrl *Controller) processNextWorkItem() bool { + key, quit := ctrl.queue.Get() + if quit { + return false + } + defer ctrl.queue.Done(key) + + err := ctrl.syncHandler(key.(string)) + ctrl.handleErr(err, key) + + return true +} + +func (ctrl *Controller) handleErr(err error, key interface{}) { + if err == nil { + ctrl.queue.Forget(key) + return + } + + if ctrl.queue.NumRequeues(key) < maxRetries { + klog.V(2).Infof("Error syncing machineconfigpool %v: %v", key, err) + ctrl.queue.AddRateLimited(key) + return + } + + utilruntime.HandleError(err) + klog.V(2).Infof("Dropping machineconfigpool %q out of the queue: %v", key, err) + ctrl.queue.Forget(key) + ctrl.queue.AddAfter(key, 1*time.Minute) +} + +// syncMachineConfigPool will sync the machineconfig pool with the given key. +// This function is not meant to be invoked concurrently with the same key. +func (ctrl *Controller) syncMachineConfigPool(key string) error { + startTime := time.Now() + klog.V(4).Infof("Started syncing machineconfigpool %q (%v)", key, startTime) + defer func() { + klog.V(4).Infof("Finished syncing machineconfigpool %q (%v)", key, time.Since(startTime)) + }() + + _, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return err + } + mcp, err := ctrl.mcpLister.Get(name) + if errors.IsNotFound(err) { + klog.V(2).Infof("MachineConfigPool %v has been deleted", key) + return nil + } + if err != nil { + return err + } + + pool := mcp.DeepCopy() + everything := metav1.LabelSelector{} + if reflect.DeepEqual(pool.Spec.MachineConfigSelector, &everything) { + return nil + } + + selector, err := metav1.LabelSelectorAsSelector(pool.Spec.MachineConfigSelector) + if err != nil { + return err + } + + imageSets, err := ctrl.imageSetLister.List(selector) + if err != nil { + return err + } + sort.SliceStable(imageSets, func(i, j int) bool { return imageSets[i].Name < imageSets[j].Name }) + + if err := ctrl.syncPinnedImageSets(pool, imageSets); err != nil { + klog.Errorf("Error syncing pinned image sets: %v", err) + return ctrl.syncFailingStatus(pool, err) + } + + return ctrl.syncAvailableStatus(pool) +} + +func (ctrl *Controller) syncAvailableStatus(pool *mcfgv1.MachineConfigPool) error { + // TODO: update with the correct condition MachineConfigPoolPinnedImageSetsDegraded + if apihelpers.IsMachineConfigPoolConditionFalse(pool.Status.Conditions, mcfgv1.MachineConfigPoolRenderDegraded) { + return nil + } + sdegraded := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionFalse, "", "") + apihelpers.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) + if _, err := ctrl.client.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); err != nil { + return err + } + return nil +} + +func (ctrl *Controller) syncFailingStatus(pool *mcfgv1.MachineConfigPool, err error) error { + sdegraded := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionTrue, "", fmt.Sprintf("Failed to populate pinned image sets for pool %s: %v", pool.Name, err)) + apihelpers.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) + if _, updateErr := ctrl.client.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); updateErr != nil { + klog.Errorf("Error updating MachineConfigPool %s: %v", pool.Name, updateErr) + } + return err +} + +func (ctrl *Controller) syncPinnedImageSets(pool *mcfgv1.MachineConfigPool, imageSets []*mcfgv1alpha1.PinnedImageSet) error { + pinnedImageSetRefs := make([]mcfgv1.PinnedImageSetRef, 0, len(imageSets)) + for _, imageSet := range imageSets { + pinnedImageSetRefs = append(pinnedImageSetRefs, mcfgv1.PinnedImageSetRef{ + Name: imageSet.Name, + }) + } + + if len(pinnedImageSetRefs) == 0 { + pinnedImageSetRefs = nil + } + + newPool := pool.DeepCopy() + newPool.Spec.PinnedImageSets = pinnedImageSetRefs + _, err := ctrl.client.MachineconfigurationV1().MachineConfigPools().Update(context.TODO(), newPool, metav1.UpdateOptions{}) + return err +} diff --git a/pkg/controller/pinnedimageset/pinned_image_set_test.go b/pkg/controller/pinnedimageset/pinned_image_set_test.go new file mode 100644 index 0000000000..a6a9d117fd --- /dev/null +++ b/pkg/controller/pinnedimageset/pinned_image_set_test.go @@ -0,0 +1,179 @@ +package pinnedimageset + +import ( + "context" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + k8sfake "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + "github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake" + informers "github.com/openshift/client-go/machineconfiguration/informers/externalversions" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + "github.com/openshift/machine-config-operator/test/helpers" + "github.com/stretchr/testify/require" +) + +var ( + alwaysReady = func() bool { return true } + noResyncPeriodFunc = func() time.Duration { return 0 } + masterPoolSelector = metav1.AddLabelToSelector(&metav1.LabelSelector{}, "machineconfiguration.openshift.io/role", "master") +) + +type fixture struct { + t *testing.T + + client *fake.Clientset + + mcpLister []*mcfgv1.MachineConfigPool + imageSetLister []*mcfgv1alpha1.PinnedImageSet + mcpIndexer cache.Indexer + + objects []runtime.Object +} + +func newFixture(t *testing.T) *fixture { + f := &fixture{} + f.t = t + f.objects = []runtime.Object{} + return f +} + +func (f *fixture) newController() *Controller { + f.client = fake.NewSimpleClientset(f.objects...) + + i := informers.NewSharedInformerFactory(f.client, noResyncPeriodFunc()) + + pisInformer := i.Machineconfiguration().V1alpha1().PinnedImageSets() + f.mcpIndexer = pisInformer.Informer().GetIndexer() + + c := New( + pisInformer, + i.Machineconfiguration().V1().MachineConfigPools(), + k8sfake.NewSimpleClientset(), + f.client, + ) + + c.mcpListerSynced = alwaysReady + c.mcpListerSynced = alwaysReady + c.imageSetSynced = alwaysReady + c.eventRecorder = ctrlcommon.NamespacedEventRecorder(&record.FakeRecorder{}) + + stopCh := make(chan struct{}) + defer close(stopCh) + i.Start(stopCh) + i.WaitForCacheSync(stopCh) + + for _, c := range f.mcpLister { + i.Machineconfiguration().V1().MachineConfigPools().Informer().GetIndexer().Add(c) + } + for _, p := range f.imageSetLister { + f.mcpIndexer.Add(p) + } + + return c +} + +func (f *fixture) updatePinnedImageSet(pis *mcfgv1alpha1.PinnedImageSet) error { + return f.mcpIndexer.Update(pis) +} + +func (f *fixture) deletePinnedImageSet(pis *mcfgv1alpha1.PinnedImageSet) error { + return f.mcpIndexer.Delete(pis) +} + +func TestUpdateSpecNewPinnedImageSet(t *testing.T) { + require := require.New(t) + f := newFixture(t) + mcp := helpers.NewMachineConfigPool("test-cluster-master", masterPoolSelector, helpers.MasterSelector, "") + pis := fakePinnedImageSet("test-pinned-image-set", "master", map[string]string{"machineconfiguration.openshift.io/role": "master"}) + + f.mcpLister = append(f.mcpLister, mcp) + f.objects = append(f.objects, mcp) + f.imageSetLister = append(f.imageSetLister, pis) + f.objects = append(f.objects, pis) + + c := f.newController() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tests := []struct { + name string + pis *mcfgv1alpha1.PinnedImageSet + deletePis *mcfgv1alpha1.PinnedImageSet + wantSpecPis []mcfgv1.PinnedImageSetRef + wantErr error + wantImageErr error + }{ + { + name: "happy path", + pis: pis, + wantSpecPis: []mcfgv1.PinnedImageSetRef{ + { + Name: pis.Name, + }, + }, + }, + { + name: "update labels to worker", + pis: fakePinnedImageSet("test-pinned-image-set", "master", map[string]string{"machineconfiguration.openshift.io/role": "worker"}), + wantSpecPis: nil, + }, + { + name: "multiple labels single match", + pis: fakePinnedImageSet("test-pinned-image-set", "infr", map[string]string{ + "machineconfiguration.openshift.io/role": "master", + "test-label": "", + }), + wantSpecPis: []mcfgv1.PinnedImageSetRef{ + { + Name: pis.Name, + }, + }, + }, + { + name: "delete pinned image set", + deletePis: pis, + wantSpecPis: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.deletePis != nil { + err := f.deletePinnedImageSet(tt.deletePis) + require.NoError(err) + } else { + err := f.updatePinnedImageSet(tt.pis) + require.NoError(err) + } + err := c.syncHandler(mcp.Name) + require.NoError(err) + + mcp, err = c.client.MachineconfigurationV1().MachineConfigPools().Get(ctx, mcp.Name, metav1.GetOptions{}) + require.NoError(err) + require.Equal(mcp.Spec.PinnedImageSets, tt.wantSpecPis) + }) + } +} + +func fakePinnedImageSet(name, image string, labels map[string]string) *mcfgv1alpha1.PinnedImageSet { + return &mcfgv1alpha1.PinnedImageSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + Spec: mcfgv1alpha1.PinnedImageSetSpec{ + PinnedImages: []mcfgv1alpha1.PinnedImageRef{ + { + Name: image, + }, + }, + }, + } +} diff --git a/pkg/daemon/pinned_image_set.go b/pkg/daemon/pinned_image_set.go index 974e214fcc..3dce4f09c3 100644 --- a/pkg/daemon/pinned_image_set.go +++ b/pkg/daemon/pinned_image_set.go @@ -187,6 +187,7 @@ type imageInfo struct { } func (p *PinnedImageSetManager) sync(key string) error { + klog.V(4).Infof("Syncing PinnedImageSet for pool: %s", key) node, err := p.nodeLister.Get(p.nodeName) if err != nil { return fmt.Errorf("failed to get node %q: %w", p.nodeName, err) @@ -401,11 +402,7 @@ func (p *PinnedImageSetManager) checkNodeAllocatableStorage(ctx context.Context, return fmt.Errorf("%w capacity: %d, required: %d", errInsufficientStorage, capacity, p.minStorageAvailableBytes.Value()) } - if err := p.checkImagePayloadStorage(ctx, imageSet.Spec.PinnedImages, capacity); err != nil { - return err - } - - return nil + return p.checkImagePayloadStorage(ctx, imageSet.Spec.PinnedImages, capacity) } func (p *PinnedImageSetManager) checkImagePayloadStorage(ctx context.Context, images []mcfgv1alpha1.PinnedImageRef, capacity int64) error { @@ -710,7 +707,7 @@ func scheduleWork(ctx context.Context, prefetchCh chan prefetch, registryAuth *r case <-ctx.Done(): return ctx.Err() default: - image := string(imageRef.Name) + image := imageRef.Name authConfig, err := registryAuth.getAuthConfigForImage(image) if err != nil { return fmt.Errorf("failed to get auth config for image %s: %w", image, err) @@ -1099,13 +1096,13 @@ func (p *PinnedImageSetManager) handleErr(err error, key interface{}) { p.queue.AddAfter(key, 1*time.Minute) } -func getImageSize(ctx context.Context, imageName string, authFilePath string) (int64, error) { +func getImageSize(ctx context.Context, imageName, authFilePath string) (int64, error) { args := []string{ "manifest", "--log-level", "error", // suppress warn log output "inspect", "--authfile", authFilePath, - string(imageName), + imageName, } output, err := exec.CommandContext(ctx, "podman", args...).CombinedOutput() @@ -1124,7 +1121,7 @@ func getImageSize(ctx context.Context, imageName string, authFilePath string) (i var totalSize int64 for _, layer := range manifest.Layers { - totalSize += int64(layer.Size) + totalSize += layer.Size } return totalSize, nil diff --git a/pkg/upgrademonitor/upgrade_monitor.go b/pkg/upgrademonitor/upgrade_monitor.go index 9ca7f93028..8c1a2ea830 100644 --- a/pkg/upgrademonitor/upgrade_monitor.go +++ b/pkg/upgrademonitor/upgrade_monitor.go @@ -29,7 +29,6 @@ type Condition struct { // there are a few stipulations. 1) if the parent and child condition exactly match their currently applied statuses, no new MCN is generated // 2) the desiredConfig in the MCN Status will only be set once the update is proven to be compatible. Meanwhile the desired and current config in the spec react to live changes of state on the Node // 3) None of this will be executed unless the TechPreviewNoUpgrade featuregate is applied. -// nolint:gocyclo func GenerateAndApplyMachineConfigNodes( parentCondition, childCondition *Condition, @@ -56,6 +55,7 @@ func UpdateMachineConfigNodeStatus( return generateAndApplyMachineConfigNodes(parentCondition, childCondition, parentStatus, childStatus, node, mcfgClient, imageSetApplyConfig, imageSetSpec, fgAccessor) } +// nolint:gocyclo func generateAndApplyMachineConfigNodes( parentCondition, childCondition *Condition, From 08f6549450a4aff90d571e84a172872a21ee2486 Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Mon, 15 Apr 2024 14:41:53 -0400 Subject: [PATCH 05/18] daemon: ensure informer factory starts after observing feature gates Signed-off-by: Sam Batschelet --- cmd/machine-config-daemon/start.go | 47 ++++++++++++++++++------------ pkg/daemon/pinned_image_set.go | 2 +- 2 files changed, 29 insertions(+), 20 deletions(-) diff --git a/cmd/machine-config-daemon/start.go b/cmd/machine-config-daemon/start.go index 8bcf1ccf2f..060f68c130 100644 --- a/cmd/machine-config-daemon/start.go +++ b/cmd/machine-config-daemon/start.go @@ -195,25 +195,7 @@ func runStartCmd(_ *cobra.Command, _ []string) { klog.Fatalf("Failed to initialize: %v", err) } - criClient, err := cri.NewClient(ctx, constants.DefaultCRIOSocketPath) - if err != nil { - klog.Fatalf("Failed to initialize CRI client: %v", err) - } - prefetchTimeout := 2 * time.Minute - pinnedImageSetManager := daemon.NewPinnedImageSetManager( - startOpts.nodeName, - criClient, - ctrlctx.ClientBuilder.MachineConfigClientOrDie(componentName), - ctrlctx.InformerFactory.Machineconfiguration().V1alpha1().PinnedImageSets(), - ctrlctx.KubeInformerFactory.Core().V1().Nodes(), - ctrlctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), - resource.MustParse(constants.MinFreeStorageAfterPrefetch), - constants.DefaultCRIOSocketPath, - constants.KubeletAuthFile, - prefetchTimeout, - ctrlctx.FeatureGateAccess, - ) - + // start config informer early because feature gate depends on it ctrlctx.ConfigInformerFactory.Start(ctrlctx.Stop) ctrlctx.KubeInformerFactory.Start(stopCh) ctrlctx.KubeNamespacedInformerFactory.Start(stopCh) @@ -223,13 +205,40 @@ func runStartCmd(_ *cobra.Command, _ []string) { select { case <-ctrlctx.FeatureGateAccess.InitialFeatureGatesObserved(): + // ok to start the rest of the informers now that we have observed the initial feature gates + + featureGates, err := ctrlctx.FeatureGateAccess.CurrentFeatureGates() if err != nil { klog.Fatalf("Could not get FG: %v", err) } else { klog.Infof("FeatureGates initialized: knownFeatureGates=%v", featureGates.KnownFeatures()) if featureGates.Enabled(configv1.FeatureGatePinnedImages) { + klog.Infof("Feature enabled: %s", configv1.FeatureGatePinnedImages) + criClient, err := cri.NewClient(ctx, constants.DefaultCRIOSocketPath) + if err != nil { + klog.Fatalf("Failed to initialize CRI client: %v", err) + } + + prefetchTimeout := 2 * time.Minute + pinnedImageSetManager := daemon.NewPinnedImageSetManager( + startOpts.nodeName, + criClient, + ctrlctx.ClientBuilder.MachineConfigClientOrDie(componentName), + ctrlctx.InformerFactory.Machineconfiguration().V1alpha1().PinnedImageSets(), + ctrlctx.KubeInformerFactory.Core().V1().Nodes(), + ctrlctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), + resource.MustParse(constants.MinFreeStorageAfterPrefetch), + constants.DefaultCRIOSocketPath, + constants.KubeletAuthFile, + prefetchTimeout, + ctrlctx.FeatureGateAccess, + ) + go pinnedImageSetManager.Run(2, stopCh) + // start the informers for the pinned image set again after the feature gate is enabled this is allowed. + // see comments in SharedInformerFactory interface. + ctrlctx.InformerFactory.Start(stopCh) } } case <-time.After(1 * time.Minute): diff --git a/pkg/daemon/pinned_image_set.go b/pkg/daemon/pinned_image_set.go index 3dce4f09c3..20b8560818 100644 --- a/pkg/daemon/pinned_image_set.go +++ b/pkg/daemon/pinned_image_set.go @@ -830,7 +830,7 @@ func (p *PinnedImageSetManager) Run(workers int, stopCh <-chan struct{}) { p.mcpSynced, ) { klog.Errorf("failed to sync initial listers cache") - return + return } workerCount, err := p.getWorkerCount() From 413b943e86fcbdd5c9039f3961257b16ce249f4a Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Tue, 16 Apr 2024 10:42:16 -0400 Subject: [PATCH 06/18] daemon: improve caching and log reporting of status Signed-off-by: Sam Batschelet --- cmd/machine-config-daemon/start.go | 1 - pkg/daemon/pinned_image_set.go | 63 +++++++++++++++++++++++++++--- 2 files changed, 57 insertions(+), 7 deletions(-) diff --git a/cmd/machine-config-daemon/start.go b/cmd/machine-config-daemon/start.go index 060f68c130..8ade1a152e 100644 --- a/cmd/machine-config-daemon/start.go +++ b/cmd/machine-config-daemon/start.go @@ -206,7 +206,6 @@ func runStartCmd(_ *cobra.Command, _ []string) { select { case <-ctrlctx.FeatureGateAccess.InitialFeatureGatesObserved(): // ok to start the rest of the informers now that we have observed the initial feature gates - featureGates, err := ctrlctx.FeatureGateAccess.CurrentFeatureGates() if err != nil { diff --git a/pkg/daemon/pinned_image_set.go b/pkg/daemon/pinned_image_set.go index 20b8560818..4bbab44465 100644 --- a/pkg/daemon/pinned_image_set.go +++ b/pkg/daemon/pinned_image_set.go @@ -182,8 +182,9 @@ func NewPinnedImageSetManager( } type imageInfo struct { - Name string - Size int64 + Name string + Size int64 + Pulled bool } func (p *PinnedImageSetManager) sync(key string) error { @@ -292,6 +293,16 @@ func (p *PinnedImageSetManager) resetWorkload(cancelFn context.CancelFunc) { p.mu.Unlock() } +func (p *PinnedImageSetManager) cancelWorkload(reason string) { + klog.Infof("Cancelling workload: %s", reason) + p.mu.Lock() + if p.cancelFn != nil { + p.cancelFn() + p.cancelFn = nil + } + p.mu.Unlock() +} + func (p *PinnedImageSetManager) syncMachineConfigPool(ctx context.Context, pool *mcfgv1.MachineConfigPool) error { if pool.Spec.PinnedImageSets == nil { return nil @@ -369,7 +380,7 @@ func (p *PinnedImageSetManager) prefetchImageSets(ctx context.Context, imageSets // monitor prefetch operations monitor := newPrefetchMonitor() for _, imageSet := range imageSets { - if err := scheduleWork(ctx, p.prefetchCh, registryAuth, imageSet.Spec.PinnedImages, monitor); err != nil { + if err := p.scheduleWork(ctx, p.prefetchCh, registryAuth, imageSet.Spec.PinnedImages, monitor); err != nil { return err } } @@ -410,6 +421,17 @@ func (p *PinnedImageSetManager) checkImagePayloadStorage(ctx context.Context, im requiredStorage := int64(0) for _, image := range images { imageName := image.Name + + // check cache if image is pulled + if value, found := p.cache.Get(imageName); found { + imageInfo, ok := value.(imageInfo) + if ok { + if imageInfo.Pulled { + continue + } + } + } + exists, err := p.criClient.ImageStatus(ctx, imageName) if err != nil { return err @@ -627,10 +649,10 @@ func getImageSetApplyConfigs(imageSetLister mcfglistersv1alpha1.PinnedImageSetLi }) imageSet, err := imageSetLister.Get(imageSets.Name) if err != nil { - klog.Errorf("Error getting pinned image set: %v", err) if apierrors.IsNotFound(err) { continue } + klog.Errorf("Error getting pinned image set: %v", err) return nil, nil, err } @@ -685,6 +707,15 @@ func (p *PinnedImageSetManager) prefetchWorker(ctx context.Context) { } task.monitor.Done() + cachedImage, ok := p.cache.Get(task.image) + if ok { + imageInfo := cachedImage.(imageInfo) + imageInfo.Pulled = true + p.cache.Add(task.image, imageInfo) + } else { + p.cache.Add(task.image, imageInfo{Name: task.image, Pulled: true}) + } + // throttle prefetching to avoid overloading the file system select { case <-time.After(defaultPrefetchThrottleDuration): @@ -695,7 +726,7 @@ func (p *PinnedImageSetManager) prefetchWorker(ctx context.Context) { } // scheduleWork schedules the prefetch work for the images and collects the first error encountered. -func scheduleWork(ctx context.Context, prefetchCh chan prefetch, registryAuth *registryAuth, prefetchImages []mcfgv1alpha1.PinnedImageRef, monitor *prefetchMonitor) error { +func (p *PinnedImageSetManager) scheduleWork(ctx context.Context, prefetchCh chan prefetch, registryAuth *registryAuth, prefetchImages []mcfgv1alpha1.PinnedImageRef, monitor *prefetchMonitor) error { totalImages := len(prefetchImages) updateIncrement := totalImages / 4 if updateIncrement == 0 { @@ -708,6 +739,22 @@ func scheduleWork(ctx context.Context, prefetchCh chan prefetch, registryAuth *r return ctx.Err() default: image := imageRef.Name + + // check cache if image is pulled and dont schedule + if value, found := p.cache.Get(image); found { + imageInfo, ok := value.(imageInfo) + if ok { + if imageInfo.Pulled { + scheduledImages++ + // report status every 25% of images scheduled + if scheduledImages%updateIncrement == 0 { + klog.Infof("Completed scheduling %d%% of images", (scheduledImages*100)/totalImages) + } + continue + } + } + } + authConfig, err := registryAuth.getAuthConfigForImage(image) if err != nil { return fmt.Errorf("failed to get auth config for image %s: %w", image, err) @@ -775,6 +822,7 @@ func (p *PinnedImageSetManager) deleteCrioConfigFile() error { if err := os.Remove(crioPinnedImagesDropInFilePath); err != nil { return fmt.Errorf("failed to remove crio config file: %w", err) } + klog.Infof("removed crio config file: %s", crioPinnedImagesDropInFilePath) return nil } @@ -830,7 +878,7 @@ func (p *PinnedImageSetManager) Run(workers int, stopCh <-chan struct{}) { p.mcpSynced, ) { klog.Errorf("failed to sync initial listers cache") - return + return } workerCount, err := p.getWorkerCount() @@ -948,6 +996,9 @@ func (p *PinnedImageSetManager) updatePinnedImageSet(oldObj, newObj interface{}) return } + // cancel any currently running tasks in the worker pool + p.cancelWorkload("PinnedImageSet update") + if triggerPinnedImageSetChange(oldImageSet, newImageSet) { klog.V(4).Infof("PinnedImageSet %s update", imageSet.Name) for _, pool := range pools { From a2f6ba182f3eb2209ff1f70a855a49d04baa5a4b Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Tue, 16 Apr 2024 15:36:09 -0400 Subject: [PATCH 07/18] daemon: better handle clearing workload when a set is mutated Signed-off-by: Sam Batschelet --- pkg/daemon/pinned_image_set.go | 167 ++++++++++++++++++--------------- 1 file changed, 89 insertions(+), 78 deletions(-) diff --git a/pkg/daemon/pinned_image_set.go b/pkg/daemon/pinned_image_set.go index 4bbab44465..64bb116145 100644 --- a/pkg/daemon/pinned_image_set.go +++ b/pkg/daemon/pinned_image_set.go @@ -69,10 +69,11 @@ const ( ) var ( - errInsufficientStorage = errors.New("storage available is less than minimum required") - errFailedToPullImage = errors.New("failed to pull image") - errNotFound = errors.New("not found") - errNoAuthForImage = errors.New("no auth found for image") + errInsufficientStorage = errors.New("storage available is less than minimum required") + errFailedToPullImage = errors.New("failed to pull image") + errNotFound = errors.New("not found") + errNoAuthForImage = errors.New("no auth found for image") + errImageSetGenerationUpdated = errors.New("image set generation updated") ) // PinnedImageSetManager manages the prefetching of images. @@ -252,6 +253,18 @@ func (p *PinnedImageSetManager) syncMachineConfigPools(ctx context.Context, pool } } + // verify all images are pulled and available if not clear the cache and requeue + for _, image := range images { + exists, err := p.criClient.ImageStatus(ctx, image.Name) + if err != nil { + return err + } + if !exists { + p.cache.Clear() + return errFailedToPullImage + } + } + // write config and reload crio last to allow a window for kubelet to gc images in emergency if err := p.ensureCrioPinnedImagesConfigFile(images); err != nil { klog.Errorf("failed to write crio config file: %v", err) @@ -284,25 +297,6 @@ func validateTargetPoolForNode(nodeName string, nodeLister corev1lister.NodeList return true, nil } -func (p *PinnedImageSetManager) resetWorkload(cancelFn context.CancelFunc) { - p.mu.Lock() - if p.cancelFn != nil { - p.cancelFn() - } - p.cancelFn = cancelFn - p.mu.Unlock() -} - -func (p *PinnedImageSetManager) cancelWorkload(reason string) { - klog.Infof("Cancelling workload: %s", reason) - p.mu.Lock() - if p.cancelFn != nil { - p.cancelFn() - p.cancelFn = nil - } - p.mu.Unlock() -} - func (p *PinnedImageSetManager) syncMachineConfigPool(ctx context.Context, pool *mcfgv1.MachineConfigPool) error { if pool.Spec.PinnedImageSets == nil { return nil @@ -325,51 +319,6 @@ func (p *PinnedImageSetManager) syncMachineConfigPool(ctx context.Context, pool return p.prefetchImageSets(ctx, imageSets...) } -// prefetchMonitor is used to monitor the status of prefetch operations. -type prefetchMonitor struct { - once sync.Once - errFn func(error) - err error - wg sync.WaitGroup -} - -func newPrefetchMonitor() *prefetchMonitor { - m := &prefetchMonitor{} - m.errFn = func(err error) { - m.once.Do(func() { - m.err = err - }) - } - return m -} - -// Add increments the number of prefetch operations the monitor is waiting for. -func (m *prefetchMonitor) Add(i int) { - m.wg.Add(i) -} - -func (m *prefetchMonitor) finalize(err error) { - m.once.Do(func() { - m.err = err - }) -} - -// Error is called when an error occurs during prefetching. -func (m *prefetchMonitor) Error(err error) { - m.finalize(err) -} - -// Done decrements the number of prefetch operations the monitor is waiting for. -func (m *prefetchMonitor) Done() { - m.wg.Done() -} - -// WaitForDone waits for the prefetch operations to complete and returns the first error encountered. -func (m *prefetchMonitor) WaitForDone() error { - m.wg.Wait() - return m.err -} - // prefetchImageSets schedules the prefetching of images for the given image sets and waits for completion. func (p *PinnedImageSetManager) prefetchImageSets(ctx context.Context, imageSets ...*mcfgv1alpha1.PinnedImageSet) error { registryAuth, err := newRegistryAuth(p.authFilePath) @@ -691,11 +640,23 @@ func (p *PinnedImageSetManager) getWorkerCount() (int, error) { return workerCount, nil } -// prefetch represents a task to prefetch an image. -type prefetch struct { - image string - auth *runtimeapi.AuthConfig - monitor *prefetchMonitor +func (p *PinnedImageSetManager) resetWorkload(cancelFn context.CancelFunc) { + p.mu.Lock() + if p.cancelFn != nil { + p.cancelFn() + } + p.cancelFn = cancelFn + p.mu.Unlock() +} + +func (p *PinnedImageSetManager) cancelWorkload(reason string) { + klog.Infof("Cancelling workload: %s", reason) + p.mu.Lock() + if p.cancelFn != nil { + p.cancelFn() + p.cancelFn = nil + } + p.mu.Unlock() } // prefetchWorker is a worker that pulls images from the container runtime. @@ -740,13 +701,13 @@ func (p *PinnedImageSetManager) scheduleWork(ctx context.Context, prefetchCh cha default: image := imageRef.Name - // check cache if image is pulled and dont schedule + // check cache if image is pulled + // this is an optimization to speedup prefetching after requeue if value, found := p.cache.Get(image); found { imageInfo, ok := value.(imageInfo) if ok { if imageInfo.Pulled { scheduledImages++ - // report status every 25% of images scheduled if scheduledImages%updateIncrement == 0 { klog.Infof("Completed scheduling %d%% of images", (scheduledImages*100)/totalImages) } @@ -996,15 +957,13 @@ func (p *PinnedImageSetManager) updatePinnedImageSet(oldObj, newObj interface{}) return } - // cancel any currently running tasks in the worker pool - p.cancelWorkload("PinnedImageSet update") - if triggerPinnedImageSetChange(oldImageSet, newImageSet) { klog.V(4).Infof("PinnedImageSet %s update", imageSet.Name) for _, pool := range pools { if !isImageSetInPool(imageSet.Name, pool) { continue } + p.cancelWorkload("PinnedImageSet update") p.enqueueMachineConfigPool(pool) } } @@ -1260,3 +1219,55 @@ func (r *registryAuth) getAuthConfigForImage(image string) (*runtimeapi.AuthConf } return auth, nil } + +// prefetch represents a task to prefetch an image. +type prefetch struct { + image string + auth *runtimeapi.AuthConfig + monitor *prefetchMonitor +} + +// prefetchMonitor is used to monitor the status of prefetch operations. +type prefetchMonitor struct { + once sync.Once + errFn func(error) + err error + wg sync.WaitGroup +} + +func newPrefetchMonitor() *prefetchMonitor { + m := &prefetchMonitor{} + m.errFn = func(err error) { + m.once.Do(func() { + m.err = err + }) + } + return m +} + +// Add increments the number of prefetch operations the monitor is waiting for. +func (m *prefetchMonitor) Add(i int) { + m.wg.Add(i) +} + +func (m *prefetchMonitor) finalize(err error) { + m.once.Do(func() { + m.err = err + }) +} + +// Error is called when an error occurs during prefetching. +func (m *prefetchMonitor) Error(err error) { + m.finalize(err) +} + +// Done decrements the number of prefetch operations the monitor is waiting for. +func (m *prefetchMonitor) Done() { + m.wg.Done() +} + +// WaitForDone waits for the prefetch operations to complete and returns the first error encountered. +func (m *prefetchMonitor) WaitForDone() error { + m.wg.Wait() + return m.err +} From 9b00591063c90a0c5130afa2c7bc2d565940f8fb Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Wed, 17 Apr 2024 11:08:49 -0400 Subject: [PATCH 08/18] deps: bump openshift/api and openshift/client-go Signed-off-by: Sam Batschelet --- go.mod | 4 +- go.sum | 8 +- vendor/github.com/openshift/api/Makefile | 1 + vendor/github.com/openshift/api/README.md | 18 +- .../openshift/api/config/v1/feature_gates.go | 14 +- .../api/config/v1/types_infrastructure.go | 28 +- ...1_infrastructures-CustomNoUpgrade.crd.yaml | 114 +++-- ...erator_01_infrastructures-Default.crd.yaml | 96 +++-- ...rastructures-TechPreviewNoUpgrade.crd.yaml | 114 +++-- vendor/github.com/openshift/api/features.md | 2 +- .../api/machineconfiguration/v1/types.go | 62 +++ ...controllerconfigs-CustomNoUpgrade.crd.yaml | 108 ++++- ...nfig_01_controllerconfigs-Default.crd.yaml | 90 +++- ...ollerconfigs-TechPreviewNoUpgrade.crd.yaml | 108 ++++- ...achineconfigpools-CustomNoUpgrade.crd.yaml | 76 ++++ ...econfigpools-TechPreviewNoUpgrade.crd.yaml | 76 ++++ .../v1/zz_generated.deepcopy.go | 21 + .../v1/zz_generated.swagger_doc_generated.go | 15 + .../openshift/api/monitoring/install.go | 4 +- .../api/monitoring/{v1alpha1 => v1}/Makefile | 2 +- .../api/monitoring/{v1alpha1 => v1}/doc.go | 2 +- .../monitoring/{v1alpha1 => v1}/register.go | 4 +- .../api/monitoring/{v1alpha1 => v1}/types.go | 133 +++--- .../{v1alpha1 => v1}/zz_generated.deepcopy.go | 6 +- ..._generated.featuregated-crd-manifests.yaml | 20 +- .../zz_generated.swagger_doc_generated.go | 38 +- .../operator/v1/types_csi_cluster_driver.go | 29 ++ ...clustercsidrivers-CustomNoUpgrade.crd.yaml | 408 ++++++++++++++++++ ...ver_01_clustercsidrivers-Default.crd.yaml} | 1 + ...ercsidrivers-TechPreviewNoUpgrade.crd.yaml | 408 ++++++++++++++++++ .../api/operator/v1/zz_generated.deepcopy.go | 15 + ..._generated.featuregated-crd-manifests.yaml | 3 +- .../v1/zz_generated.swagger_doc_generated.go | 7 +- .../v1/machineconfigpoolstatus.go | 14 + .../v1/poolsynchronizerstatus.go | 81 ++++ .../applyconfigurations/internal/internal.go | 9 + .../operator/v1/vspherecsidriverconfigspec.go | 29 +- vendor/modules.txt | 6 +- 38 files changed, 1897 insertions(+), 277 deletions(-) rename vendor/github.com/openshift/api/monitoring/{v1alpha1 => v1}/Makefile (78%) rename vendor/github.com/openshift/api/monitoring/{v1alpha1 => v1}/doc.go (88%) rename vendor/github.com/openshift/api/monitoring/{v1alpha1 => v1}/register.go (97%) rename vendor/github.com/openshift/api/monitoring/{v1alpha1 => v1}/types.go (69%) rename vendor/github.com/openshift/api/monitoring/{v1alpha1 => v1}/zz_generated.deepcopy.go (98%) rename vendor/github.com/openshift/api/monitoring/{v1alpha1 => v1}/zz_generated.featuregated-crd-manifests.yaml (75%) rename vendor/github.com/openshift/api/monitoring/{v1alpha1 => v1}/zz_generated.swagger_doc_generated.go (76%) create mode 100644 vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers-CustomNoUpgrade.crd.yaml rename vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/{0000_90_csi-driver_01_clustercsidrivers.crd.yaml => 0000_90_csi-driver_01_clustercsidrivers-Default.crd.yaml} (99%) create mode 100644 vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers-TechPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/poolsynchronizerstatus.go diff --git a/go.mod b/go.mod index fbda09ef66..af40e45036 100644 --- a/go.mod +++ b/go.mod @@ -27,8 +27,8 @@ require ( github.com/google/renameio v0.1.0 github.com/imdario/mergo v0.3.13 github.com/opencontainers/go-digest v1.0.0 - github.com/openshift/api v0.0.0-20240410141538-3c0461467316 - github.com/openshift/client-go v0.0.0-20240408153607-64bd6feb83ae + github.com/openshift/api v0.0.0-20240416170644-3efee4252217 + github.com/openshift/client-go v0.0.0-20240415214935-be70f772f157 github.com/openshift/cluster-config-operator v0.0.0-alpha.0.0.20231213185242-e4dc676febfe github.com/openshift/library-go v0.0.0-20240412173449-eb2f24c36528 github.com/openshift/runtime-utils v0.0.0-20230921210328-7bdb5b9c177b diff --git a/go.sum b/go.sum index afa6ab27b2..f000736d93 100644 --- a/go.sum +++ b/go.sum @@ -689,10 +689,10 @@ github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQB github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M= github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/openshift/api v0.0.0-20240410141538-3c0461467316 h1:YfatsH1e28WTbuNUl3LZl6PKCr6zBaYLR7JSxg+LJSU= -github.com/openshift/api v0.0.0-20240410141538-3c0461467316/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4= -github.com/openshift/client-go v0.0.0-20240408153607-64bd6feb83ae h1:WzEMf853apLG0ZgkfmKvYXBKBqhzx7nalP306yQP1ck= -github.com/openshift/client-go v0.0.0-20240408153607-64bd6feb83ae/go.mod h1:YOfx7b9ieudQJImuo95BcVzifosCrCGBErbO/w/njBU= +github.com/openshift/api v0.0.0-20240416170644-3efee4252217 h1:86Q6ukNk2zFmBGnVeSloph4U7yuPdcfNyu268xgrAUY= +github.com/openshift/api v0.0.0-20240416170644-3efee4252217/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4= +github.com/openshift/client-go v0.0.0-20240415214935-be70f772f157 h1:xbd4qHpyFnnOYDHHnMQa+100MR+K/DFiC1J3BFdL+tQ= +github.com/openshift/client-go v0.0.0-20240415214935-be70f772f157/go.mod h1:Q3mt/X5xrxnR5R6BE7duF2ToLioRQJYnTYaaDS4QZTs= github.com/openshift/cluster-config-operator v0.0.0-alpha.0.0.20231213185242-e4dc676febfe h1:wDQtyIbJJIoif2Ux0S+9MJWIWEGV0oG+iLm8WtqwdSw= github.com/openshift/cluster-config-operator v0.0.0-alpha.0.0.20231213185242-e4dc676febfe/go.mod h1:SGUtv1pKZSzSVr2YCxXFvhE+LbGfI+vcetEhNicKayw= github.com/openshift/kube-openapi v0.0.0-20230816122517-ffc8f001abb0 h1:GPlAy197Jkr+D0T2FNWanamraTdzS/r9ZkT29lxvHaA= diff --git a/vendor/github.com/openshift/api/Makefile b/vendor/github.com/openshift/api/Makefile index 3b3c94edce..7fd5397518 100644 --- a/vendor/github.com/openshift/api/Makefile +++ b/vendor/github.com/openshift/api/Makefile @@ -51,6 +51,7 @@ verify-scripts: bash -x hack/verify-prerelease-lifecycle-gen.sh hack/verify-payload-crds.sh hack/verify-payload-featuregates.sh + hack/verify-promoted-features-pass-tests.sh .PHONY: verify verify: verify-scripts verify-crd-schema verify-codegen-crds diff --git a/vendor/github.com/openshift/api/README.md b/vendor/github.com/openshift/api/README.md index b46693a108..2054ba8151 100644 --- a/vendor/github.com/openshift/api/README.md +++ b/vendor/github.com/openshift/api/README.md @@ -52,7 +52,7 @@ FeatureGateMyFeatureName = newFeatureGate("MyFeatureName"). mustRegister() ``` -### defining tests +### defining API validation tests Tests are logically associated with FeatureGates. When adding any FeatureGated functionality a new test file is required. The test files are located in `//tests//FeatureGate.yaml`: @@ -87,6 +87,22 @@ react to changes when the FeatureGates are enabled/disabled on various FeatureSe [`gen-minimal-test.sh`](tests/hack/gen-minimal-test.sh) can still function to stub out files if you don't want to copy/paste an existing one. +### defining FeatureGate e2e tests + +In order to move an API into the `Default` FeatureSet, it is necessary to demonstrate completeness and reliability. +E2E tests are the ONLY category of test that automatically prevents regression over time: repository presubmits do NOT provide equivalent protection. +To confirm this, there is an automated verify script that runs every time a FeatureGate is added to the `Default` FeatureSet. +The script queries our CI system (sippy/component readiness) to retrieve a list of all automated tests for a given FeatureGate +and then enforces the following rules. +1. Tests must contain either `[OCPFeatureGate:]` or the standard upstream `[FeatureGate:]`. +2. There must be at least five tests for each FeatureGate. +3. Every test must be run on every TechPreview platform we have jobs for. (Ask for an exception if your feature doesn't support a variant.) +4. Every test must run at least 14 times on every platform/variant. +5. Every test must pass at least 95% of the time on every platform/variant. + +If your FeatureGate lacks automated testing, there is an exception process that allows QE to sign off on the promotion by +commenting on the PR. + ## defining new APIs diff --git a/vendor/github.com/openshift/api/config/v1/feature_gates.go b/vendor/github.com/openshift/api/config/v1/feature_gates.go index 7a9d129cfb..2786c2b30d 100644 --- a/vendor/github.com/openshift/api/config/v1/feature_gates.go +++ b/vendor/github.com/openshift/api/config/v1/feature_gates.go @@ -554,13 +554,6 @@ var ( enableIn(TechPreviewNoUpgrade). mustRegister() - FeatureGateAlertingRules = newFeatureGate("AlertingRules"). - reportProblemsToJiraComponent("Monitoring"). - contactPerson("simon"). - productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). - mustRegister() - FeatureGateBareMetalLoadBalancer = newFeatureGate("BareMetalLoadBalancer"). reportProblemsToJiraComponent("metal"). contactPerson("EmilienM"). @@ -595,4 +588,11 @@ var ( productScope(ocpSpecific). enableIn(TechPreviewNoUpgrade). mustRegister() + + FeatureGateVSphereDriverConfiguration = newFeatureGate("VSphereDriverConfiguration"). + reportProblemsToJiraComponent("Storage / Kubernetes External Components"). + contactPerson("rbednar"). + productScope(ocpSpecific). + enableIn(TechPreviewNoUpgrade). + mustRegister() ) diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 611ba928cb..fb224c6e61 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -824,7 +824,7 @@ type BareMetalPlatformSpec struct { // Once set, the list cannot be completely removed (but its second entry can). // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="size(self) == 2 ? self.exists_one(x, x.contains(':')) : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" // +listType=set // +optional APIServerInternalIPs []IP `json:"apiServerInternalIPs"` @@ -839,7 +839,7 @@ type BareMetalPlatformSpec struct { // Once set, the list cannot be completely removed (but its second entry can). // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="size(self) == 2 ? self.exists_one(x, x.contains(':')) : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" // +listType=set // +optional IngressIPs []IP `json:"ingressIPs"` @@ -951,7 +951,7 @@ type OpenStackPlatformSpec struct { // Once set, the list cannot be completely removed (but its second entry can). // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="size(self) == 2 ? self.exists_one(x, x.contains(':')) : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" // +listType=set // +optional APIServerInternalIPs []IP `json:"apiServerInternalIPs"` @@ -966,7 +966,7 @@ type OpenStackPlatformSpec struct { // Once set, the list cannot be completely removed (but its second entry can). // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="size(self) == 2 ? self.exists_one(x, x.contains(':')) : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" // +listType=set // +optional IngressIPs []IP `json:"ingressIPs"` @@ -1365,7 +1365,7 @@ type VSpherePlatformSpec struct { // Once set, the list cannot be completely removed (but its second entry can). // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="size(self) == 2 ? self.exists_one(x, x.contains(':')) : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address" // +listType=set // +optional APIServerInternalIPs []IP `json:"apiServerInternalIPs"` @@ -1380,7 +1380,7 @@ type VSpherePlatformSpec struct { // Once set, the list cannot be completely removed (but its second entry can). // // +kubebuilder:validation:MaxItems=2 - // +kubebuilder:validation:XValidation:rule="size(self) == 2 ? self.exists_one(x, x.contains(':')) : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" + // +kubebuilder:validation:XValidation:rule="size(self) == 2 && isIP(self[0]) && isIP(self[1]) ? ip(self[0]).family() != ip(self[1]).family() : true",message="ingressIPs must contain at most one IPv4 address and at most one IPv6 address" // +listType=set // +optional IngressIPs []IP `json:"ingressIPs"` @@ -1857,17 +1857,13 @@ type InfrastructureList struct { } // IP is an IP address (for example, "10.0.0.0" or "fd00::"). -// +kubebuilder:validation:Pattern=`(^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*)` -// + --- -// + The regex for the IPv4 and IPv6 address was taken from -// + https://blog.markhatton.co.uk/2011/03/15/regular-expressions-for-ip-addresses-cidr-ranges-and-hostnames/ -// + The resulting regex is an OR of both regexes. +// +kubebuilder:validation:XValidation:rule="isIP(self)",message="value must be a valid IP address" +// +kubebuilder:validation:MaxLength:=39 +// +kubebuilder:validation:MinLength:=1 type IP string // CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). -// +kubebuilder:validation:Pattern=`(^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$)` -// + --- -// + The regex for the IPv4 and IPv6 CIDR range was taken from -// + https://blog.markhatton.co.uk/2011/03/15/regular-expressions-for-ip-addresses-cidr-ranges-and-hostnames/ -// + The resulting regex is an OR of both regexes. +// +kubebuilder:validation:XValidation:rule="isCIDR(self)",message="value must be a valid CIDR network address" +// +kubebuilder:validation:MaxLength:=43 +// +kubebuilder:validation:MinLength:=1 type CIDR string diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml index 12963bdc6a..b934a2992f 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-CustomNoUpgrade.crd.yaml @@ -122,16 +122,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' ingressIPs: description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets @@ -145,16 +149,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided @@ -163,8 +171,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -421,16 +433,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' ingressIPs: description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets @@ -444,16 +460,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided @@ -462,8 +482,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -563,16 +587,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' failureDomains: description: failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure @@ -714,16 +742,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided @@ -732,8 +764,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1230,8 +1266,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1339,8 +1379,12 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 16 type: array x-kubernetes-list-type: set @@ -1355,8 +1399,12 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 16 type: array x-kubernetes-list-type: set @@ -1370,8 +1418,12 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 16 type: array x-kubernetes-list-type: set @@ -1786,8 +1838,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -2063,8 +2119,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml index 59b4c62388..fee1bca656 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-Default.crd.yaml @@ -122,16 +122,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' ingressIPs: description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets @@ -145,16 +149,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided @@ -163,8 +171,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -421,16 +433,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' ingressIPs: description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets @@ -444,16 +460,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided @@ -462,8 +482,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -563,16 +587,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' failureDomains: description: failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure @@ -714,16 +742,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided @@ -732,8 +764,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1230,8 +1266,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1570,8 +1610,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1847,8 +1891,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml index cac0b0cc48..3291669201 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-TechPreviewNoUpgrade.crd.yaml @@ -122,16 +122,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' ingressIPs: description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets @@ -145,16 +149,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided @@ -163,8 +171,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -421,16 +433,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' ingressIPs: description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets @@ -444,16 +460,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided @@ -462,8 +482,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -563,16 +587,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' failureDomains: description: failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure @@ -714,16 +742,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) - : true' + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used to connect all the OpenShift cluster nodes. Each network is provided @@ -732,8 +764,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1230,8 +1266,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1339,8 +1379,12 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 16 type: array x-kubernetes-list-type: set @@ -1355,8 +1399,12 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 16 type: array x-kubernetes-list-type: set @@ -1370,8 +1418,12 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 16 type: array x-kubernetes-list-type: set @@ -1786,8 +1838,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -2063,8 +2119,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set diff --git a/vendor/github.com/openshift/api/features.md b/vendor/github.com/openshift/api/features.md index fd496313af..8f6c752462 100644 --- a/vendor/github.com/openshift/api/features.md +++ b/vendor/github.com/openshift/api/features.md @@ -1,6 +1,5 @@ | FeatureGate | Default on Hypershift | Default on SelfManagedHA | TechPreviewNoUpgrade on Hypershift | TechPreviewNoUpgrade on SelfManagedHA | | ------ | --- | --- | --- | --- | -| AlertingRules| | | Enabled | Enabled | | AutomatedEtcdBackup| | | Enabled | Enabled | | CSIDriverSharedResource| | | Enabled | Enabled | | DNSNameResolver| | | Enabled | Enabled | @@ -38,6 +37,7 @@ | SigstoreImageVerification| | | Enabled | Enabled | | TranslateStreamCloseWebsocketRequests| | | Enabled | Enabled | | UpgradeStatus| | | Enabled | Enabled | +| VSphereDriverConfiguration| | | Enabled | Enabled | | ValidatingAdmissionPolicy| | | Enabled | Enabled | | VolumeGroupSnapshot| | | Enabled | Enabled | | ExternalOIDC| Enabled | | Enabled | Enabled | diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/types.go b/vendor/github.com/openshift/api/machineconfiguration/v1/types.go index c04fbdfd4d..a67f9d59d9 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/types.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/types.go @@ -503,8 +503,62 @@ type MachineConfigPoolStatus struct { // +listType=atomic // +optional CertExpirys []CertExpiry `json:"certExpirys"` + + // poolSynchronizersStatus is the status of the machines managed by the pool synchronizers. + // +openshift:enable:FeatureGate=PinnedImages + // +listType=map + // +listMapKey=poolSynchronizerType + // +optional + PoolSynchronizersStatus []PoolSynchronizerStatus `json:"poolSynchronizersStatus"` +} + +// +kubebuilder:validation:XValidation:rule="self.machineCount >= self.updatedMachineCount", message="machineCount must be greater than or equal to updatedMachineCount" +// +kubebuilder:validation:XValidation:rule="self.machineCount >= self.availableMachineCount", message="machineCount must be greater than or equal to availableMachineCount" +// +kubebuilder:validation:XValidation:rule="self.machineCount >= self.unavailableMachineCount", message="machineCount must be greater than or equal to unavailableMachineCount" +// +kubebuilder:validation:XValidation:rule="self.machineCount >= self.readyMachineCount", message="machineCount must be greater than or equal to readyMachineCount" +// +kubebuilder:validation:XValidation:rule="self.availableMachineCount >= self.readyMachineCount", message="availableMachineCount must be greater than or equal to readyMachineCount" +type PoolSynchronizerStatus struct { + // poolSynchronizerType describes the type of the pool synchronizer. + // +kubebuilder:validation:Required + PoolSynchronizerType PoolSynchronizerType `json:"poolSynchronizerType"` + // machineCount is the number of machines that are managed by the node synchronizer. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=0 + MachineCount int64 `json:"machineCount"` + // updatedMachineCount is the number of machines that have been updated by the node synchronizer. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=0 + UpdatedMachineCount int64 `json:"updatedMachineCount"` + // readyMachineCount is the number of machines managed by the node synchronizer that are in a ready state. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=0 + ReadyMachineCount int64 `json:"readyMachineCount"` + // availableMachineCount is the number of machines managed by the node synchronizer which are available. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=0 + AvailableMachineCount int64 `json:"availableMachineCount"` + // unavailableMachineCount is the number of machines managed by the node synchronizer but are unavailable. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=0 + UnavailableMachineCount int64 `json:"unavailableMachineCount"` + // +kubebuilder:validation:XValidation:rule="self >= oldSelf || (self == 0 && oldSelf > 0)", message="observedGeneration must not move backwards except to zero" + // observedGeneration is the last generation change that has been applied. + // +kubebuilder:validation:Minimum=0 + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` } +// PoolSynchronizerType is an enum that describe the type of pool synchronizer. A pool synchronizer is a one or more controllers that +// manages the on disk state of a set of machines within a pool. +// +kubebuilder:validation:Enum:="PinnedImageSets" +// +kubebuilder:validation:MaxLength=256 +type PoolSynchronizerType string + +const ( + // PinnedImageSets represents a pool synchronizer for pinned image sets. + PinnedImageSets PoolSynchronizerType = "PinnedImageSets" +) + // ceryExpiry contains the bundle name and the expiry date type CertExpiry struct { // bundle is the name of the bundle in which the subject certificate resides @@ -575,6 +629,14 @@ const ( // MachineConfigPoolRenderDegraded means the rendered configuration for the pool cannot be generated because of an error MachineConfigPoolRenderDegraded MachineConfigPoolConditionType = "RenderDegraded" + // MachineConfigPoolPinnedImageSetsDegraded means the pinned image sets for the pool cannot be populated because of an error + // +openshift:enable:FeatureGate=PinnedImages + MachineConfigPoolPinnedImageSetsDegraded MachineConfigPoolConditionType = "PinnedImageSetsDegraded" + + // MachineConfigPoolSynchronizerDegraded means the pool synchronizer can not be updated because of an error + // +openshift:enable:FeatureGate=PinnedImages + MachineConfigPoolSynchronizerDegraded MachineConfigPoolConditionType = "PoolSynchronizerDegraded" + // MachineConfigPoolDegraded is the overall status of the pool based, today, on whether we fail with NodeDegraded or RenderDegraded MachineConfigPoolDegraded MachineConfigPoolConditionType = "Degraded" diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-CustomNoUpgrade.crd.yaml index 69bead6760..d55c788195 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-CustomNoUpgrade.crd.yaml @@ -377,15 +377,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' ingressIPs: description: ingressIPs are the external IPs which @@ -401,15 +406,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used @@ -419,8 +429,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -692,15 +706,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' ingressIPs: description: ingressIPs are the external IPs which @@ -716,15 +735,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used @@ -734,8 +758,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -838,15 +866,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' failureDomains: description: failureDomains contains the definition @@ -1000,15 +1033,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used @@ -1018,8 +1056,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1545,8 +1587,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1659,8 +1705,12 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 16 type: array x-kubernetes-list-type: set @@ -1676,8 +1726,12 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 16 type: array x-kubernetes-list-type: set @@ -1692,8 +1746,12 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 16 type: array x-kubernetes-list-type: set @@ -2142,8 +2200,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -2438,8 +2500,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-Default.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-Default.crd.yaml index 5c64eb7f2c..a328ea4ae4 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-Default.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-Default.crd.yaml @@ -377,15 +377,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' ingressIPs: description: ingressIPs are the external IPs which @@ -401,15 +406,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used @@ -419,8 +429,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -692,15 +706,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' ingressIPs: description: ingressIPs are the external IPs which @@ -716,15 +735,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used @@ -734,8 +758,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -838,15 +866,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' failureDomains: description: failureDomains contains the definition @@ -1000,15 +1033,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used @@ -1018,8 +1056,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1545,8 +1587,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1906,8 +1952,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -2202,8 +2252,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-TechPreviewNoUpgrade.crd.yaml index 0203f30d1b..4be3b0e47f 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-TechPreviewNoUpgrade.crd.yaml @@ -377,15 +377,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' ingressIPs: description: ingressIPs are the external IPs which @@ -401,15 +406,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used @@ -419,8 +429,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -692,15 +706,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' ingressIPs: description: ingressIPs are the external IPs which @@ -716,15 +735,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used @@ -734,8 +758,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -838,15 +866,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' failureDomains: description: failureDomains contains the definition @@ -1000,15 +1033,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used @@ -1018,8 +1056,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1545,8 +1587,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1659,8 +1705,12 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 16 type: array x-kubernetes-list-type: set @@ -1676,8 +1726,12 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 16 type: array x-kubernetes-list-type: set @@ -1692,8 +1746,12 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 16 type: array x-kubernetes-list-type: set @@ -2142,8 +2200,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -2438,8 +2500,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-CustomNoUpgrade.crd.yaml index 7e4d18cb51..c4df02a787 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-CustomNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-CustomNoUpgrade.crd.yaml @@ -525,6 +525,82 @@ spec: by the controller. format: int64 type: integer + poolSynchronizersStatus: + description: poolSynchronizersStatus is the status of the machines + managed by the pool synchronizers. + items: + properties: + availableMachineCount: + description: availableMachineCount is the number of machines + managed by the node synchronizer which are available. + format: int64 + minimum: 0 + type: integer + machineCount: + description: machineCount is the number of machines that are + managed by the node synchronizer. + format: int64 + minimum: 0 + type: integer + observedGeneration: + description: observedGeneration is the last generation change + that has been applied. + format: int64 + minimum: 0 + type: integer + x-kubernetes-validations: + - message: observedGeneration must not move backwards except + to zero + rule: self >= oldSelf || (self == 0 && oldSelf > 0) + poolSynchronizerType: + description: poolSynchronizerType describes the type of the + pool synchronizer. + enum: + - PinnedImageSets + maxLength: 256 + type: string + readyMachineCount: + description: readyMachineCount is the number of machines managed + by the node synchronizer that are in a ready state. + format: int64 + minimum: 0 + type: integer + unavailableMachineCount: + description: unavailableMachineCount is the number of machines + managed by the node synchronizer but are unavailable. + format: int64 + minimum: 0 + type: integer + updatedMachineCount: + description: updatedMachineCount is the number of machines that + have been updated by the node synchronizer. + format: int64 + minimum: 0 + type: integer + required: + - availableMachineCount + - machineCount + - poolSynchronizerType + - readyMachineCount + - unavailableMachineCount + - updatedMachineCount + type: object + x-kubernetes-validations: + - message: machineCount must be greater than or equal to updatedMachineCount + rule: self.machineCount >= self.updatedMachineCount + - message: machineCount must be greater than or equal to availableMachineCount + rule: self.machineCount >= self.availableMachineCount + - message: machineCount must be greater than or equal to unavailableMachineCount + rule: self.machineCount >= self.unavailableMachineCount + - message: machineCount must be greater than or equal to readyMachineCount + rule: self.machineCount >= self.readyMachineCount + - message: availableMachineCount must be greater than or equal to + readyMachineCount + rule: self.availableMachineCount >= self.readyMachineCount + type: array + x-kubernetes-list-map-keys: + - poolSynchronizerType + x-kubernetes-list-type: map readyMachineCount: description: readyMachineCount represents the total number of ready machines targeted by the pool. diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-TechPreviewNoUpgrade.crd.yaml index d87cb5a06d..f04afccf18 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-TechPreviewNoUpgrade.crd.yaml @@ -525,6 +525,82 @@ spec: by the controller. format: int64 type: integer + poolSynchronizersStatus: + description: poolSynchronizersStatus is the status of the machines + managed by the pool synchronizers. + items: + properties: + availableMachineCount: + description: availableMachineCount is the number of machines + managed by the node synchronizer which are available. + format: int64 + minimum: 0 + type: integer + machineCount: + description: machineCount is the number of machines that are + managed by the node synchronizer. + format: int64 + minimum: 0 + type: integer + observedGeneration: + description: observedGeneration is the last generation change + that has been applied. + format: int64 + minimum: 0 + type: integer + x-kubernetes-validations: + - message: observedGeneration must not move backwards except + to zero + rule: self >= oldSelf || (self == 0 && oldSelf > 0) + poolSynchronizerType: + description: poolSynchronizerType describes the type of the + pool synchronizer. + enum: + - PinnedImageSets + maxLength: 256 + type: string + readyMachineCount: + description: readyMachineCount is the number of machines managed + by the node synchronizer that are in a ready state. + format: int64 + minimum: 0 + type: integer + unavailableMachineCount: + description: unavailableMachineCount is the number of machines + managed by the node synchronizer but are unavailable. + format: int64 + minimum: 0 + type: integer + updatedMachineCount: + description: updatedMachineCount is the number of machines that + have been updated by the node synchronizer. + format: int64 + minimum: 0 + type: integer + required: + - availableMachineCount + - machineCount + - poolSynchronizerType + - readyMachineCount + - unavailableMachineCount + - updatedMachineCount + type: object + x-kubernetes-validations: + - message: machineCount must be greater than or equal to updatedMachineCount + rule: self.machineCount >= self.updatedMachineCount + - message: machineCount must be greater than or equal to availableMachineCount + rule: self.machineCount >= self.availableMachineCount + - message: machineCount must be greater than or equal to unavailableMachineCount + rule: self.machineCount >= self.unavailableMachineCount + - message: machineCount must be greater than or equal to readyMachineCount + rule: self.machineCount >= self.readyMachineCount + - message: availableMachineCount must be greater than or equal to + readyMachineCount + rule: self.availableMachineCount >= self.readyMachineCount + type: array + x-kubernetes-list-map-keys: + - poolSynchronizerType + x-kubernetes-list-type: map readyMachineCount: description: readyMachineCount represents the total number of ready machines targeted by the pool. diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.deepcopy.go index d89627b91d..9ad13130fe 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.deepcopy.go @@ -766,6 +766,11 @@ func (in *MachineConfigPoolStatus) DeepCopyInto(out *MachineConfigPoolStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.PoolSynchronizersStatus != nil { + in, out := &in.PoolSynchronizersStatus, &out.PoolSynchronizersStatus + *out = make([]PoolSynchronizerStatus, len(*in)) + copy(*out, *in) + } return } @@ -864,3 +869,19 @@ func (in *PinnedImageSetRef) DeepCopy() *PinnedImageSetRef { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PoolSynchronizerStatus) DeepCopyInto(out *PoolSynchronizerStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PoolSynchronizerStatus. +func (in *PoolSynchronizerStatus) DeepCopy() *PoolSynchronizerStatus { + if in == nil { + return nil + } + out := new(PoolSynchronizerStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go index 8923b78b8b..29a3a2a902 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.swagger_doc_generated.go @@ -297,6 +297,7 @@ var map_MachineConfigPoolStatus = map[string]string{ "degradedMachineCount": "degradedMachineCount represents the total number of machines marked degraded (or unreconcilable). A node is marked degraded if applying a configuration failed..", "conditions": "conditions represents the latest available observations of current state.", "certExpirys": "certExpirys keeps track of important certificate expiration data", + "poolSynchronizersStatus": "poolSynchronizersStatus is the status of the machines managed by the pool synchronizers.", } func (MachineConfigPoolStatus) SwaggerDoc() map[string]string { @@ -344,4 +345,18 @@ func (PinnedImageSetRef) SwaggerDoc() map[string]string { return map_PinnedImageSetRef } +var map_PoolSynchronizerStatus = map[string]string{ + "poolSynchronizerType": "poolSynchronizerType describes the type of the pool synchronizer.", + "machineCount": "machineCount is the number of machines that are managed by the node synchronizer.", + "updatedMachineCount": "updatedMachineCount is the number of machines that have been updated by the node synchronizer.", + "readyMachineCount": "readyMachineCount is the number of machines managed by the node synchronizer that are in a ready state.", + "availableMachineCount": "availableMachineCount is the number of machines managed by the node synchronizer which are available.", + "unavailableMachineCount": "unavailableMachineCount is the number of machines managed by the node synchronizer but are unavailable.", + "observedGeneration": "observedGeneration is the last generation change that has been applied.", +} + +func (PoolSynchronizerStatus) SwaggerDoc() map[string]string { + return map_PoolSynchronizerStatus +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/monitoring/install.go b/vendor/github.com/openshift/api/monitoring/install.go index 4a25391108..cc34a01dcf 100644 --- a/vendor/github.com/openshift/api/monitoring/install.go +++ b/vendor/github.com/openshift/api/monitoring/install.go @@ -4,7 +4,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - monitoringv1alpha1 "github.com/openshift/api/monitoring/v1alpha1" + monitoringv1 "github.com/openshift/api/monitoring/v1" ) const ( @@ -12,7 +12,7 @@ const ( ) var ( - schemeBuilder = runtime.NewSchemeBuilder(monitoringv1alpha1.Install) + schemeBuilder = runtime.NewSchemeBuilder(monitoringv1.Install) // Install is a function which adds every version of this group to a scheme Install = schemeBuilder.AddToScheme ) diff --git a/vendor/github.com/openshift/api/monitoring/v1alpha1/Makefile b/vendor/github.com/openshift/api/monitoring/v1/Makefile similarity index 78% rename from vendor/github.com/openshift/api/monitoring/v1alpha1/Makefile rename to vendor/github.com/openshift/api/monitoring/v1/Makefile index 536d21926d..0a7a62e1c8 100644 --- a/vendor/github.com/openshift/api/monitoring/v1alpha1/Makefile +++ b/vendor/github.com/openshift/api/monitoring/v1/Makefile @@ -1,3 +1,3 @@ .PHONY: test test: - make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="monitoring.openshift.io/v1alpha1" + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="monitoring.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/monitoring/v1alpha1/doc.go b/vendor/github.com/openshift/api/monitoring/v1/doc.go similarity index 88% rename from vendor/github.com/openshift/api/monitoring/v1alpha1/doc.go rename to vendor/github.com/openshift/api/monitoring/v1/doc.go index fde7f93d40..bf046d6ef9 100644 --- a/vendor/github.com/openshift/api/monitoring/v1alpha1/doc.go +++ b/vendor/github.com/openshift/api/monitoring/v1/doc.go @@ -3,4 +3,4 @@ // +k8s:openapi-gen=true // +groupName=monitoring.openshift.io -package v1alpha1 +package v1 diff --git a/vendor/github.com/openshift/api/monitoring/v1alpha1/register.go b/vendor/github.com/openshift/api/monitoring/v1/register.go similarity index 97% rename from vendor/github.com/openshift/api/monitoring/v1alpha1/register.go rename to vendor/github.com/openshift/api/monitoring/v1/register.go index b0a28f995e..342c4cca21 100644 --- a/vendor/github.com/openshift/api/monitoring/v1alpha1/register.go +++ b/vendor/github.com/openshift/api/monitoring/v1/register.go @@ -1,4 +1,4 @@ -package v1alpha1 +package v1 import ( corev1 "k8s.io/api/core/v1" @@ -9,7 +9,7 @@ import ( var ( GroupName = "monitoring.openshift.io" - GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) // Install is a function which adds this version to a scheme Install = schemeBuilder.AddToScheme diff --git a/vendor/github.com/openshift/api/monitoring/v1alpha1/types.go b/vendor/github.com/openshift/api/monitoring/v1/types.go similarity index 69% rename from vendor/github.com/openshift/api/monitoring/v1alpha1/types.go rename to vendor/github.com/openshift/api/monitoring/v1/types.go index d2776037e6..111538ba78 100644 --- a/vendor/github.com/openshift/api/monitoring/v1alpha1/types.go +++ b/vendor/github.com/openshift/api/monitoring/v1/types.go @@ -1,4 +1,4 @@ -package v1alpha1 +package v1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,26 +26,25 @@ import ( // // https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md // -// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. -// +openshift:compatibility-gen:level=4 +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 // +genclient // +k8s:openapi-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +kubebuilder:object:root=true // +kubebuilder:resource:path=alertingrules,scope=Namespaced // +kubebuilder:subresource:status -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1179 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1406 // +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=monitoring,operatorOrdering=01 -// +openshift:enable:FeatureGate=AlertingRules // +kubebuilder:metadata:annotations="description=OpenShift Monitoring alerting rules" type AlertingRule struct { metav1.TypeMeta `json:",inline"` - // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec describes the desired state of this AlertingRule object. + // +kubebuilder:validation:Required Spec AlertingRuleSpec `json:"spec"` // status describes the current state of this AlertOverrides object. @@ -58,17 +57,17 @@ type AlertingRule struct { // AlertingRuleList is a list of AlertingRule objects. // -// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. -// +openshift:compatibility-gen:level=4 +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 // +k8s:openapi-gen=true type AlertingRuleList struct { metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. + // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` // items is a list of AlertingRule objects. + // +kubebuilder:validation:Required Items []AlertingRule `json:"items"` } @@ -94,9 +93,17 @@ type AlertingRuleSpec struct { // +listType=map // +listMapKey=name // +kubebuilder:validation:MinItems:=1 + // +kubebuilder:validation:Required Groups []RuleGroup `json:"groups"` } +// Duration is a valid prometheus time duration. +// Supported units: y, w, d, h, m, s, ms +// Examples: `30s`, `1m`, `1h20m15s`, `15d` +// +kubebuilder:validation:Pattern:="^(0|(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?)$" +// +kubebuilder:validation:MaxLength=2048 +type Duration string + // RuleGroup is a list of sequentially evaluated alerting rules. // // +k8s:openapi-gen=true @@ -104,6 +111,8 @@ type RuleGroup struct { // name is the name of the group. // // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=2048 Name string `json:"name"` // interval is how often rules in the group are evaluated. If not specified, @@ -111,25 +120,17 @@ type RuleGroup struct { // which itself defaults to 30 seconds. You can check if this value has been // modified from the default on your cluster by inspecting the platform // Prometheus configuration: - // - // $ oc -n openshift-monitoring describe prometheus k8s - // // The relevant field in that resource is: spec.evaluationInterval // - // This is represented as a Prometheus duration, e.g. 1d, 1h30m, 5m, 10s. You - // can find the upstream documentation here: - // - // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#duration - // - // +kubebuilder:validation:Pattern:="^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$" // +optional - Interval string `json:"interval,omitempty"` + Interval Duration `json:"interval,omitempty"` // rules is a list of sequentially evaluated alerting rules. Prometheus may // process rule groups in parallel, but rules within a single group are always // processed sequentially, and all rules are processed. // // +kubebuilder:validation:MinItems:=1 + // +kubebuilder:validation:Required Rules []Rule `json:"rules"` } @@ -139,37 +140,32 @@ type RuleGroup struct { // // +k8s:openapi-gen=true type Rule struct { - // alert is the name of the alert. Must be a valid label value, i.e. only - // contain ASCII letters, numbers, and underscores. + // alert is the name of the alert. Must be a valid label value, i.e. may + // contain any Unicode character. // - // +kubebuilder:validation:Pattern:="^[a-zA-Z_][a-zA-Z0-9_]*$" - // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=2048 Alert string `json:"alert"` // expr is the PromQL expression to evaluate. Every evaluation cycle this is // evaluated at the current time, and all resultant time series become pending // or firing alerts. This is most often a string representing a PromQL - // expression, e.g.: - // - // mapi_current_pending_csr > mapi_max_pending_csr - // + // expression, e.g.: mapi_current_pending_csr > mapi_max_pending_csr // In rare cases this could be a simple integer, e.g. a simple "1" if the // intent is to create an alert that is always firing. This is sometimes used // to create an always-firing "Watchdog" alert in order to ensure the alerting // pipeline is functional. // - // +required + // +kubebuilder:validation:Required Expr intstr.IntOrString `json:"expr"` // for is the time period after which alerts are considered firing after first // returning results. Alerts which have not yet fired for long enough are - // considered pending. This is represented as a Prometheus duration, for - // details on the format see: - // - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#duration + // considered pending. // - // +kubebuilder:validation:Pattern:="^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$" // +optional - For string `json:"for,omitempty"` + For Duration `json:"for,omitempty"` // labels to add or overwrite for each alert. The results of the PromQL // expression for the alert will result in an existing set of labels for the @@ -177,24 +173,14 @@ type Rule struct { // the same name as a label in that set, the label here wins and overwrites // the previous value. These should typically be short identifying values // that may be useful to query against. A common example is the alert - // severity: - // - // labels: - // severity: warning + // severity, where one sets `severity: warning` under the `labels` key: // // +optional Labels map[string]string `json:"labels,omitempty"` // annotations to add to each alert. These are values that can be used to // store longer additional information that you won't query on, such as alert - // descriptions or runbook links, e.g.: - // - // annotations: - // summary: HAProxy reload failure - // description: | - // This alert fires when HAProxy fails to reload its - // configuration, which will result in the router not picking up - // recently created or modified routes. + // descriptions or runbook links. // // +optional Annotations map[string]string `json:"annotations,omitempty"` @@ -224,6 +210,9 @@ type PrometheusRuleRef struct { // the reference should we ever need to. // name of the referenced PrometheusRule. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=2048 Name string `json:"name"` } @@ -233,24 +222,23 @@ type PrometheusRuleRef struct { // AlertRelabelConfig defines a set of relabel configs for alerts. // -// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. -// +openshift:compatibility-gen:level=4 +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 // +k8s:openapi-gen=true // +kubebuilder:object:root=true // +kubebuilder:resource:path=alertrelabelconfigs,scope=Namespaced // +kubebuilder:subresource:status -// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1179 +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/1406 // +openshift:file-pattern=cvoRunLevel=0000_50,operatorName=monitoring,operatorOrdering=02 -// +openshift:enable:FeatureGate=AlertingRules // +kubebuilder:metadata:annotations="description=OpenShift Monitoring alert relabel configurations" type AlertRelabelConfig struct { metav1.TypeMeta `json:",inline"` - // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec describes the desired state of this AlertRelabelConfig object. + // +kubebuilder:validation:Required Spec AlertRelabelConfigSpec `json:"spec"` // status describes the current state of this AlertRelabelConfig object. @@ -266,6 +254,7 @@ type AlertRelabelConfigSpec struct { // configs is a list of sequentially evaluated alert relabel configs. // // +kubebuilder:validation:MinItems:=1 + // +kubebuilder:validation:Required Configs []RelabelConfig `json:"configs"` } @@ -285,18 +274,19 @@ const ( // AlertRelabelConfigList is a list of AlertRelabelConfigs. // -// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. -// +openshift:compatibility-gen:level=4 +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 // +k8s:openapi-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type AlertRelabelConfigList struct { metav1.TypeMeta `json:",inline"` - - // metadata is the standard list's metadata. + // metadata is the standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` // items is a list of AlertRelabelConfigs. + // +kubebuilder:validation:MinItems:=1 + // +kubebuilder:validation:Required Items []*AlertRelabelConfig `json:"items"` } @@ -304,6 +294,7 @@ type AlertRelabelConfigList struct { // letters, numbers, and underscores. // // +kubebuilder:validation:Pattern:="^[a-zA-Z_][a-zA-Z0-9_]*$" +// +kubebuilder:validation:MaxLength=2048 type LabelName string // RelabelConfig allows dynamic rewriting of label sets for alerts. @@ -311,11 +302,22 @@ type LabelName string // - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs // - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config // +// +kubebuilder:validation:XValidation:rule="self.action != 'HashMod' || self.modulus != 0",message="relabel action hashmod requires non-zero modulus" +// +kubebuilder:validation:XValidation:rule="(self.action != 'Replace' && self.action != 'HashMod') || has(self.targetLabel)",message="targetLabel is required when action is Replace or HashMod" +// +kubebuilder:validation:XValidation:rule="(self.action != 'LabelDrop' && self.action != 'LabelKeep') || !has(self.sourceLabels)",message="LabelKeep and LabelDrop actions require only 'regex', and no other fields (found sourceLabels)" +// +kubebuilder:validation:XValidation:rule="(self.action != 'LabelDrop' && self.action != 'LabelKeep') || !has(self.targetLabel)",message="LabelKeep and LabelDrop actions require only 'regex', and no other fields (found targetLabel)" +// +kubebuilder:validation:XValidation:rule="(self.action != 'LabelDrop' && self.action != 'LabelKeep') || !has(self.modulus)",message="LabelKeep and LabelDrop actions require only 'regex', and no other fields (found modulus)" +// +kubebuilder:validation:XValidation:rule="(self.action != 'LabelDrop' && self.action != 'LabelKeep') || !has(self.separator)",message="LabelKeep and LabelDrop actions require only 'regex', and no other fields (found separator)" +// +kubebuilder:validation:XValidation:rule="(self.action != 'LabelDrop' && self.action != 'LabelKeep') || !has(self.replacement)",message="LabelKeep and LabelDrop actions require only 'regex', and no other fields (found replacement)" +// +kubebuilder:validation:XValidation:rule="!has(self.modulus) || (has(self.modulus) && size(self.sourceLabels) > 0)",message="modulus requires sourceLabels to be present" +// +kubebuilder:validation:XValidation:rule="(self.action == 'LabelDrop' || self.action == 'LabelKeep') || has(self.sourceLabels)",message="sourceLabels is required for actions Replace, Keep, Drop, HashMod and LabelMap" +// +kubebuilder:validation:XValidation:rule="(self.action != 'Replace' && self.action != 'LabelMap') || has(self.replacement)",message="replacement is required for actions Replace and LabelMap" // +k8s:openapi-gen=true type RelabelConfig struct { // sourceLabels select values from existing labels. Their content is // concatenated using the configured separator and matched against the - // configured regular expression for the Replace, Keep, and Drop actions. + // configured regular expression for the 'Replace', 'Keep', and 'Drop' actions. + // Not allowed for actions 'LabelKeep' and 'LabelDrop'. // // +optional SourceLabels []LabelName `json:"sourceLabels,omitempty"` @@ -324,36 +326,45 @@ type RelabelConfig struct { // Prometheus will use its default value of ';'. // // +optional + // +kubebuilder:validation:MaxLength=2048 Separator string `json:"separator,omitempty"` // targetLabel to which the resulting value is written in a 'Replace' action. - // It is mandatory for 'Replace' and 'HashMod' actions. Regex capture groups + // It is required for 'Replace' and 'HashMod' actions and forbidden for + // actions 'LabelKeep' and 'LabelDrop'. Regex capture groups // are available. // // +optional + // +kubebuilder:validation:MaxLength=2048 TargetLabel string `json:"targetLabel,omitempty"` // regex against which the extracted value is matched. Default is: '(.*)' + // regex is required for all actions except 'HashMod' // // +optional + // +kubebuilder:default=(.*) + // +kubebuilder:validation:MaxLength=2048 Regex string `json:"regex,omitempty"` // modulus to take of the hash of the source label values. This can be // combined with the 'HashMod' action to set 'target_label' to the 'modulus' - // of a hash of the concatenated 'source_labels'. + // of a hash of the concatenated 'source_labels'. This is only valid if + // sourceLabels is not empty and action is not 'LabelKeep' or 'LabelDrop'. // // +optional Modulus uint64 `json:"modulus,omitempty"` // replacement value against which a regex replace is performed if the regular // expression matches. This is required if the action is 'Replace' or - // 'LabelMap'. Regex capture groups are available. Default is: '$1' + // 'LabelMap' and forbidden for actions 'LabelKeep' and 'LabelDrop'. + // Regex capture groups are available. Default is: '$1' // // +optional + // +kubebuilder:validation:MaxLength=2048 Replacement string `json:"replacement,omitempty"` - // action to perform based on regex matching. Must be one of: Replace, Keep, - // Drop, HashMod, LabelMap, LabelDrop, or LabelKeep. Default is: 'Replace' + // action to perform based on regex matching. Must be one of: 'Replace', 'Keep', + // 'Drop', 'HashMod', 'LabelMap', 'LabelDrop', or 'LabelKeep'. Default is: 'Replace' // // +kubebuilder:validation:Enum=Replace;Keep;Drop;HashMod;LabelMap;LabelDrop;LabelKeep // +kubebuilder:default=Replace diff --git a/vendor/github.com/openshift/api/monitoring/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/monitoring/v1/zz_generated.deepcopy.go similarity index 98% rename from vendor/github.com/openshift/api/monitoring/v1alpha1/zz_generated.deepcopy.go rename to vendor/github.com/openshift/api/monitoring/v1/zz_generated.deepcopy.go index 649d823a10..cb472ccf54 100644 --- a/vendor/github.com/openshift/api/monitoring/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/monitoring/v1/zz_generated.deepcopy.go @@ -3,10 +3,10 @@ // Code generated by deepcopy-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -103,7 +103,7 @@ func (in *AlertRelabelConfigStatus) DeepCopyInto(out *AlertRelabelConfigStatus) *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/vendor/github.com/openshift/api/monitoring/v1alpha1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/monitoring/v1/zz_generated.featuregated-crd-manifests.yaml similarity index 75% rename from vendor/github.com/openshift/api/monitoring/v1alpha1/zz_generated.featuregated-crd-manifests.yaml rename to vendor/github.com/openshift/api/monitoring/v1/zz_generated.featuregated-crd-manifests.yaml index 2dcb2bc84b..0efeba4190 100644 --- a/vendor/github.com/openshift/api/monitoring/v1alpha1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/monitoring/v1/zz_generated.featuregated-crd-manifests.yaml @@ -1,12 +1,11 @@ alertrelabelconfigs.monitoring.openshift.io: Annotations: description: OpenShift Monitoring alert relabel configurations - ApprovedPRNumber: https://github.com/openshift/api/pull/1179 + ApprovedPRNumber: https://github.com/openshift/api/pull/1406 CRDName: alertrelabelconfigs.monitoring.openshift.io Capability: "" Category: "" - FeatureGates: - - AlertingRules + FeatureGates: [] FilenameOperatorName: monitoring FilenameOperatorOrdering: "02" FilenameRunLevel: "0000_50" @@ -18,19 +17,17 @@ alertrelabelconfigs.monitoring.openshift.io: PrinterColumns: [] Scope: Namespaced ShortNames: null - TopLevelFeatureGates: - - AlertingRules - Version: v1alpha1 + TopLevelFeatureGates: [] + Version: v1 alertingrules.monitoring.openshift.io: Annotations: description: OpenShift Monitoring alerting rules - ApprovedPRNumber: https://github.com/openshift/api/pull/1179 + ApprovedPRNumber: https://github.com/openshift/api/pull/1406 CRDName: alertingrules.monitoring.openshift.io Capability: "" Category: "" - FeatureGates: - - AlertingRules + FeatureGates: [] FilenameOperatorName: monitoring FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_50" @@ -42,7 +39,6 @@ alertingrules.monitoring.openshift.io: PrinterColumns: [] Scope: Namespaced ShortNames: null - TopLevelFeatureGates: - - AlertingRules - Version: v1alpha1 + TopLevelFeatureGates: [] + Version: v1 diff --git a/vendor/github.com/openshift/api/monitoring/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/monitoring/v1/zz_generated.swagger_doc_generated.go similarity index 76% rename from vendor/github.com/openshift/api/monitoring/v1alpha1/zz_generated.swagger_doc_generated.go rename to vendor/github.com/openshift/api/monitoring/v1/zz_generated.swagger_doc_generated.go index 7592907097..adb3837720 100644 --- a/vendor/github.com/openshift/api/monitoring/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/monitoring/v1/zz_generated.swagger_doc_generated.go @@ -1,4 +1,4 @@ -package v1alpha1 +package v1 // This file contains a collection of methods that can be used from go-restful to // generate Swagger API documentation for its models. Please read this PR for more @@ -12,7 +12,7 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE var map_AlertRelabelConfig = map[string]string{ - "": "AlertRelabelConfig defines a set of relabel configs for alerts.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "": "AlertRelabelConfig defines a set of relabel configs for alerts.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec describes the desired state of this AlertRelabelConfig object.", "status": "status describes the current state of this AlertRelabelConfig object.", @@ -23,8 +23,8 @@ func (AlertRelabelConfig) SwaggerDoc() map[string]string { } var map_AlertRelabelConfigList = map[string]string{ - "": "AlertRelabelConfigList is a list of AlertRelabelConfigs.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "": "AlertRelabelConfigList is a list of AlertRelabelConfigs.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is a list of AlertRelabelConfigs.", } @@ -51,7 +51,7 @@ func (AlertRelabelConfigStatus) SwaggerDoc() map[string]string { } var map_AlertingRule = map[string]string{ - "": "AlertingRule represents a set of user-defined Prometheus rule groups containing alerting rules. This resource is the supported method for cluster admins to create alerts based on metrics recorded by the platform monitoring stack in OpenShift, i.e. the Prometheus instance deployed to the openshift-monitoring namespace. You might use this to create custom alerting rules not shipped with OpenShift based on metrics from components such as the node_exporter, which provides machine-level metrics such as CPU usage, or kube-state-metrics, which provides metrics on Kubernetes usage.\n\nThe API is mostly compatible with the upstream PrometheusRule type from the prometheus-operator. The primary difference being that recording rules are not allowed here -- only alerting rules. For each AlertingRule resource created, a corresponding PrometheusRule will be created in the openshift-monitoring namespace. OpenShift requires admins to use the AlertingRule resource rather than the upstream type in order to allow better OpenShift specific defaulting and validation, while not modifying the upstream APIs directly.\n\nYou can find upstream API documentation for PrometheusRule resources here:\n\nhttps://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "": "AlertingRule represents a set of user-defined Prometheus rule groups containing alerting rules. This resource is the supported method for cluster admins to create alerts based on metrics recorded by the platform monitoring stack in OpenShift, i.e. the Prometheus instance deployed to the openshift-monitoring namespace. You might use this to create custom alerting rules not shipped with OpenShift based on metrics from components such as the node_exporter, which provides machine-level metrics such as CPU usage, or kube-state-metrics, which provides metrics on Kubernetes usage.\n\nThe API is mostly compatible with the upstream PrometheusRule type from the prometheus-operator. The primary difference being that recording rules are not allowed here -- only alerting rules. For each AlertingRule resource created, a corresponding PrometheusRule will be created in the openshift-monitoring namespace. OpenShift requires admins to use the AlertingRule resource rather than the upstream type in order to allow better OpenShift specific defaulting and validation, while not modifying the upstream APIs directly.\n\nYou can find upstream API documentation for PrometheusRule resources here:\n\nhttps://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec describes the desired state of this AlertingRule object.", "status": "status describes the current state of this AlertOverrides object.", @@ -62,8 +62,8 @@ func (AlertingRule) SwaggerDoc() map[string]string { } var map_AlertingRuleList = map[string]string{ - "": "AlertingRuleList is a list of AlertingRule objects.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "": "AlertingRuleList is a list of AlertingRule objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "items": "items is a list of AlertingRule objects.", } @@ -101,13 +101,13 @@ func (PrometheusRuleRef) SwaggerDoc() map[string]string { var map_RelabelConfig = map[string]string{ "": "RelabelConfig allows dynamic rewriting of label sets for alerts. See Prometheus documentation: - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config", - "sourceLabels": "sourceLabels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the Replace, Keep, and Drop actions.", + "sourceLabels": "sourceLabels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the 'Replace', 'Keep', and 'Drop' actions. Not allowed for actions 'LabelKeep' and 'LabelDrop'.", "separator": "separator placed between concatenated source label values. When omitted, Prometheus will use its default value of ';'.", - "targetLabel": "targetLabel to which the resulting value is written in a 'Replace' action. It is mandatory for 'Replace' and 'HashMod' actions. Regex capture groups are available.", - "regex": "regex against which the extracted value is matched. Default is: '(.*)'", - "modulus": "modulus to take of the hash of the source label values. This can be combined with the 'HashMod' action to set 'target_label' to the 'modulus' of a hash of the concatenated 'source_labels'.", - "replacement": "replacement value against which a regex replace is performed if the regular expression matches. This is required if the action is 'Replace' or 'LabelMap'. Regex capture groups are available. Default is: '$1'", - "action": "action to perform based on regex matching. Must be one of: Replace, Keep, Drop, HashMod, LabelMap, LabelDrop, or LabelKeep. Default is: 'Replace'", + "targetLabel": "targetLabel to which the resulting value is written in a 'Replace' action. It is required for 'Replace' and 'HashMod' actions and forbidden for actions 'LabelKeep' and 'LabelDrop'. Regex capture groups are available.", + "regex": "regex against which the extracted value is matched. Default is: '(.*)' regex is required for all actions except 'HashMod'", + "modulus": "modulus to take of the hash of the source label values. This can be combined with the 'HashMod' action to set 'target_label' to the 'modulus' of a hash of the concatenated 'source_labels'. This is only valid if sourceLabels is not empty and action is not 'LabelKeep' or 'LabelDrop'.", + "replacement": "replacement value against which a regex replace is performed if the regular expression matches. This is required if the action is 'Replace' or 'LabelMap' and forbidden for actions 'LabelKeep' and 'LabelDrop'. Regex capture groups are available. Default is: '$1'", + "action": "action to perform based on regex matching. Must be one of: 'Replace', 'Keep', 'Drop', 'HashMod', 'LabelMap', 'LabelDrop', or 'LabelKeep'. Default is: 'Replace'", } func (RelabelConfig) SwaggerDoc() map[string]string { @@ -116,11 +116,11 @@ func (RelabelConfig) SwaggerDoc() map[string]string { var map_Rule = map[string]string{ "": "Rule describes an alerting rule. See Prometheus documentation: - https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules", - "alert": "alert is the name of the alert. Must be a valid label value, i.e. only contain ASCII letters, numbers, and underscores.", - "expr": "expr is the PromQL expression to evaluate. Every evaluation cycle this is evaluated at the current time, and all resultant time series become pending or firing alerts. This is most often a string representing a PromQL expression, e.g.:\n\n mapi_current_pending_csr > mapi_max_pending_csr\n\nIn rare cases this could be a simple integer, e.g. a simple \"1\" if the intent is to create an alert that is always firing. This is sometimes used to create an always-firing \"Watchdog\" alert in order to ensure the alerting pipeline is functional.", - "for": "for is the time period after which alerts are considered firing after first returning results. Alerts which have not yet fired for long enough are considered pending. This is represented as a Prometheus duration, for details on the format see: - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#duration", - "labels": "labels to add or overwrite for each alert. The results of the PromQL expression for the alert will result in an existing set of labels for the alert, after evaluating the expression, for any label specified here with the same name as a label in that set, the label here wins and overwrites the previous value. These should typically be short identifying values that may be useful to query against. A common example is the alert severity:\n\n labels:\n severity: warning", - "annotations": "annotations to add to each alert. These are values that can be used to store longer additional information that you won't query on, such as alert descriptions or runbook links, e.g.:\n\n annotations:\n summary: HAProxy reload failure\n description: |\n This alert fires when HAProxy fails to reload its\n configuration, which will result in the router not picking up\n recently created or modified routes.", + "alert": "alert is the name of the alert. Must be a valid label value, i.e. may contain any Unicode character.", + "expr": "expr is the PromQL expression to evaluate. Every evaluation cycle this is evaluated at the current time, and all resultant time series become pending or firing alerts. This is most often a string representing a PromQL expression, e.g.: mapi_current_pending_csr > mapi_max_pending_csr In rare cases this could be a simple integer, e.g. a simple \"1\" if the intent is to create an alert that is always firing. This is sometimes used to create an always-firing \"Watchdog\" alert in order to ensure the alerting pipeline is functional.", + "for": "for is the time period after which alerts are considered firing after first returning results. Alerts which have not yet fired for long enough are considered pending.", + "labels": "labels to add or overwrite for each alert. The results of the PromQL expression for the alert will result in an existing set of labels for the alert, after evaluating the expression, for any label specified here with the same name as a label in that set, the label here wins and overwrites the previous value. These should typically be short identifying values that may be useful to query against. A common example is the alert severity, where one sets `severity: warning` under the `labels` key:", + "annotations": "annotations to add to each alert. These are values that can be used to store longer additional information that you won't query on, such as alert descriptions or runbook links.", } func (Rule) SwaggerDoc() map[string]string { @@ -130,7 +130,7 @@ func (Rule) SwaggerDoc() map[string]string { var map_RuleGroup = map[string]string{ "": "RuleGroup is a list of sequentially evaluated alerting rules.", "name": "name is the name of the group.", - "interval": "interval is how often rules in the group are evaluated. If not specified, it defaults to the global.evaluation_interval configured in Prometheus, which itself defaults to 30 seconds. You can check if this value has been modified from the default on your cluster by inspecting the platform Prometheus configuration:\n\n$ oc -n openshift-monitoring describe prometheus k8s\n\nThe relevant field in that resource is: spec.evaluationInterval\n\nThis is represented as a Prometheus duration, e.g. 1d, 1h30m, 5m, 10s. You can find the upstream documentation here:\n\nhttps://prometheus.io/docs/prometheus/latest/configuration/configuration/#duration", + "interval": "interval is how often rules in the group are evaluated. If not specified, it defaults to the global.evaluation_interval configured in Prometheus, which itself defaults to 30 seconds. You can check if this value has been modified from the default on your cluster by inspecting the platform Prometheus configuration: The relevant field in that resource is: spec.evaluationInterval", "rules": "rules is a list of sequentially evaluated alerting rules. Prometheus may process rule groups in parallel, but rules within a single group are always processed sequentially, and all rules are processed.", } diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go index 0156d6df32..349c8d461d 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go @@ -281,6 +281,35 @@ type VSphereCSIDriverConfigSpec struct { // will be rejected. // +optional TopologyCategories []string `json:"topologyCategories,omitempty"` + + // globalMaxSnapshotsPerBlockVolume is a global configuration parameter that applies to volumes on all kinds of + // datastores. If omitted, the platform chooses a default, which is subject to change over time, currently that default is 3. + // Snapshots can not be disabled using this parameter. + // Increasing number of snapshots above 3 can have negative impact on performance, for more details see: https://kb.vmware.com/s/article/1025279 + // Volume snapshot documentation: https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/3.0/vmware-vsphere-csp-getting-started/GUID-E0B41C69-7EEB-450F-A73D-5FD2FF39E891.html + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=32 + // +openshift:enable:FeatureGate=VSphereDriverConfiguration + // +optional + GlobalMaxSnapshotsPerBlockVolume *uint32 `json:"globalMaxSnapshotsPerBlockVolume,omitempty"` + + // granularMaxSnapshotsPerBlockVolumeInVSAN is a granular configuration parameter on vSAN datastore only. It + // overrides GlobalMaxSnapshotsPerBlockVolume if set, while it falls back to the global constraint if unset. + // Snapshots for VSAN can not be disabled using this parameter. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=32 + // +openshift:enable:FeatureGate=VSphereDriverConfiguration + // +optional + GranularMaxSnapshotsPerBlockVolumeInVSAN *uint32 `json:"granularMaxSnapshotsPerBlockVolumeInVSAN,omitempty"` + + // granularMaxSnapshotsPerBlockVolumeInVVOL is a granular configuration parameter on Virtual Volumes datastore only. + // It overrides GlobalMaxSnapshotsPerBlockVolume if set, while it falls back to the global constraint if unset. + // Snapshots for VVOL can not be disabled using this parameter. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=32 + // +openshift:enable:FeatureGate=VSphereDriverConfiguration + // +optional + GranularMaxSnapshotsPerBlockVolumeInVVOL *uint32 `json:"granularMaxSnapshotsPerBlockVolumeInVVOL,omitempty"` } // ClusterCSIDriverStatus is the observed status of CSI driver operator diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers-CustomNoUpgrade.crd.yaml new file mode 100644 index 0000000000..2853d06675 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers-CustomNoUpgrade.crd.yaml @@ -0,0 +1,408 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/701 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: CustomNoUpgrade + name: clustercsidrivers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: ClusterCSIDriver + listKind: ClusterCSIDriverList + plural: clustercsidrivers + singular: clustercsidriver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ClusterCSIDriver object allows management and configuration + of a CSI driver operator installed by default in OpenShift. Name of the + object must be name of the CSI driver it operates. See CSIDriverName type + for list of allowed values. \n Compatibility level 1: Stable within a major + release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + properties: + name: + enum: + - ebs.csi.aws.com + - efs.csi.aws.com + - disk.csi.azure.com + - file.csi.azure.com + - filestore.csi.storage.gke.io + - pd.csi.storage.gke.io + - cinder.csi.openstack.org + - csi.vsphere.vmware.com + - manila.csi.openstack.org + - csi.ovirt.org + - csi.kubevirt.io + - csi.sharedresource.openshift.io + - diskplugin.csi.alibabacloud.com + - vpc.block.csi.ibm.io + - powervs.csi.ibm.com + - secrets-store.csi.k8s.io + - smb.csi.k8s.io + type: string + type: object + spec: + description: spec holds user settable values for configuration + properties: + driverConfig: + description: driverConfig can be used to specify platform specific + driver configuration. When omitted, this means no opinion and the + platform is left to choose reasonable defaults. These defaults are + subject to change over time. + properties: + aws: + description: aws is used to configure the AWS CSI driver. + properties: + kmsKeyARN: + description: kmsKeyARN sets the cluster default storage class + to encrypt volumes with a user-defined KMS key, rather than + the default KMS key used by AWS. The value may be either + the ARN or Alias ARN of a KMS key. + pattern: ^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b|aws-iso-e|aws-iso-f):kms:[a-z0-9-]+:[0-9]{12}:(key|alias)\/.*$ + type: string + type: object + azure: + description: azure is used to configure the Azure CSI driver. + properties: + diskEncryptionSet: + description: diskEncryptionSet sets the cluster default storage + class to encrypt volumes with a customer-managed encryption + set, rather than the default platform-managed keys. + properties: + name: + description: name is the name of the disk encryption set + that will be set on the default storage class. The value + should consist of only alphanumberic characters, underscores + (_), hyphens, and be at most 80 characters in length. + maxLength: 80 + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + resourceGroup: + description: resourceGroup defines the Azure resource + group that contains the disk encryption set. The value + should consist of only alphanumberic characters, underscores + (_), parentheses, hyphens and periods. The value should + not end in a period and be at most 90 characters in + length. + maxLength: 90 + pattern: ^[\w\.\-\(\)]*[\w\-\(\)]$ + type: string + subscriptionID: + description: 'subscriptionID defines the Azure subscription + that contains the disk encryption set. The value should + meet the following conditions: 1. It should be a 128-bit + number. 2. It should be 36 characters (32 hexadecimal + characters and 4 hyphens) long. 3. It should be displayed + in five groups separated by hyphens (-). 4. The first + group should be 8 characters long. 5. The second, third, + and fourth groups should be 4 characters long. 6. The + fifth group should be 12 characters long. An Example + SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378' + maxLength: 36 + pattern: ^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$ + type: string + required: + - name + - resourceGroup + - subscriptionID + type: object + type: object + driverType: + description: 'driverType indicates type of CSI driver for which + the driverConfig is being applied to. Valid values are: AWS, + Azure, GCP, IBMCloud, vSphere and omitted. Consumers should + treat unknown values as a NO-OP.' + enum: + - "" + - AWS + - Azure + - GCP + - IBMCloud + - vSphere + type: string + gcp: + description: gcp is used to configure the GCP CSI driver. + properties: + kmsKey: + description: kmsKey sets the cluster default storage class + to encrypt volumes with customer-supplied encryption keys, + rather than the default keys managed by GCP. + properties: + keyRing: + description: keyRing is the name of the KMS Key Ring which + the KMS Key belongs to. The value should correspond + to an existing KMS key ring and should consist of only + alphanumeric characters, hyphens (-) and underscores + (_), and be at most 63 characters in length. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + location: + description: location is the GCP location in which the + Key Ring exists. The value must match an existing GCP + location, or "global". Defaults to global, if not set. + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + name: + description: name is the name of the customer-managed + encryption key to be used for disk encryption. The value + should correspond to an existing KMS key and should + consist of only alphanumeric characters, hyphens (-) + and underscores (_), and be at most 63 characters in + length. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + projectID: + description: projectID is the ID of the Project in which + the KMS Key Ring exists. It must be 6 to 30 lowercase + letters, digits, or hyphens. It must start with a letter. + Trailing hyphens are prohibited. + maxLength: 30 + minLength: 6 + pattern: ^[a-z][a-z0-9-]+[a-z0-9]$ + type: string + required: + - keyRing + - name + - projectID + type: object + type: object + ibmcloud: + description: ibmcloud is used to configure the IBM Cloud CSI driver. + properties: + encryptionKeyCRN: + description: encryptionKeyCRN is the IBM Cloud CRN of the + customer-managed root key to use for disk encryption of + volumes for the default storage classes. + maxLength: 154 + minLength: 144 + pattern: ^crn:v[0-9]+:bluemix:(public|private):(kms|hs-crypto):[a-z-]+:a/[0-9a-f]+:[0-9a-f-]{36}:key:[0-9a-f-]{36}$ + type: string + required: + - encryptionKeyCRN + type: object + vSphere: + description: vsphere is used to configure the vsphere CSI driver. + properties: + globalMaxSnapshotsPerBlockVolume: + description: 'globalMaxSnapshotsPerBlockVolume is a global + configuration parameter that applies to volumes on all kinds + of datastores. If omitted, the platform chooses a default, + which is subject to change over time, currently that default + is 3. Snapshots can not be disabled using this parameter. + Increasing number of snapshots above 3 can have negative + impact on performance, for more details see: https://kb.vmware.com/s/article/1025279 + Volume snapshot documentation: https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/3.0/vmware-vsphere-csp-getting-started/GUID-E0B41C69-7EEB-450F-A73D-5FD2FF39E891.html' + format: int32 + maximum: 32 + minimum: 1 + type: integer + granularMaxSnapshotsPerBlockVolumeInVSAN: + description: granularMaxSnapshotsPerBlockVolumeInVSAN is a + granular configuration parameter on vSAN datastore only. + It overrides GlobalMaxSnapshotsPerBlockVolume if set, while + it falls back to the global constraint if unset. Snapshots + for VSAN can not be disabled using this parameter. + format: int32 + maximum: 32 + minimum: 1 + type: integer + granularMaxSnapshotsPerBlockVolumeInVVOL: + description: granularMaxSnapshotsPerBlockVolumeInVVOL is a + granular configuration parameter on Virtual Volumes datastore + only. It overrides GlobalMaxSnapshotsPerBlockVolume if set, + while it falls back to the global constraint if unset. Snapshots + for VVOL can not be disabled using this parameter. + format: int32 + maximum: 32 + minimum: 1 + type: integer + topologyCategories: + description: topologyCategories indicates tag categories with + which vcenter resources such as hostcluster or datacenter + were tagged with. If cluster Infrastructure object has a + topology, values specified in Infrastructure object will + be used and modifications to topologyCategories will be + rejected. + items: + type: string + type: array + type: object + required: + - driverType + type: object + x-kubernetes-validations: + - message: ibmcloud must be set if driverType is 'IBMCloud', but remain + unset otherwise + rule: 'has(self.driverType) && self.driverType == ''IBMCloud'' ? + has(self.ibmcloud) : !has(self.ibmcloud)' + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + storageClassState: + description: StorageClassState determines if CSI operator should create + and manage storage classes. If this field value is empty or Managed + - CSI operator will continuously reconcile storage class and create + if necessary. If this field value is Unmanaged - CSI operator will + not reconcile any previously created storage class. If this field + value is Removed - CSI operator will delete the storage class it + created previously. When omitted, this means the user has no opinion + and the platform chooses a reasonable default, which is subject + to change over time. The current default behaviour is Managed. + enum: + - "" + - Managed + - Unmanaged + - Removed + type: string + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration + that was computed by the operator. Red Hat does not support the + use of this field. Misuse of this field could lead to unexpected + behavior or conflict with other configuration options. Seek guidance + from the Red Hat support before using this field. Use of this property + blocks cluster upgrades, it must be removed before upgrading your + cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + required: + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + x-kubernetes-list-type: atomic + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers-Default.crd.yaml similarity index 99% rename from vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers.crd.yaml rename to vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers-Default.crd.yaml index 492777737b..1aa61e8d40 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers-Default.crd.yaml @@ -6,6 +6,7 @@ metadata: api.openshift.io/merged-by-featuregates: "true" include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: Default name: clustercsidrivers.operator.openshift.io spec: group: operator.openshift.io diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..1141e1840f --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,408 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/701 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: clustercsidrivers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: ClusterCSIDriver + listKind: ClusterCSIDriverList + plural: clustercsidrivers + singular: clustercsidriver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ClusterCSIDriver object allows management and configuration + of a CSI driver operator installed by default in OpenShift. Name of the + object must be name of the CSI driver it operates. See CSIDriverName type + for list of allowed values. \n Compatibility level 1: Stable within a major + release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + properties: + name: + enum: + - ebs.csi.aws.com + - efs.csi.aws.com + - disk.csi.azure.com + - file.csi.azure.com + - filestore.csi.storage.gke.io + - pd.csi.storage.gke.io + - cinder.csi.openstack.org + - csi.vsphere.vmware.com + - manila.csi.openstack.org + - csi.ovirt.org + - csi.kubevirt.io + - csi.sharedresource.openshift.io + - diskplugin.csi.alibabacloud.com + - vpc.block.csi.ibm.io + - powervs.csi.ibm.com + - secrets-store.csi.k8s.io + - smb.csi.k8s.io + type: string + type: object + spec: + description: spec holds user settable values for configuration + properties: + driverConfig: + description: driverConfig can be used to specify platform specific + driver configuration. When omitted, this means no opinion and the + platform is left to choose reasonable defaults. These defaults are + subject to change over time. + properties: + aws: + description: aws is used to configure the AWS CSI driver. + properties: + kmsKeyARN: + description: kmsKeyARN sets the cluster default storage class + to encrypt volumes with a user-defined KMS key, rather than + the default KMS key used by AWS. The value may be either + the ARN or Alias ARN of a KMS key. + pattern: ^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b|aws-iso-e|aws-iso-f):kms:[a-z0-9-]+:[0-9]{12}:(key|alias)\/.*$ + type: string + type: object + azure: + description: azure is used to configure the Azure CSI driver. + properties: + diskEncryptionSet: + description: diskEncryptionSet sets the cluster default storage + class to encrypt volumes with a customer-managed encryption + set, rather than the default platform-managed keys. + properties: + name: + description: name is the name of the disk encryption set + that will be set on the default storage class. The value + should consist of only alphanumberic characters, underscores + (_), hyphens, and be at most 80 characters in length. + maxLength: 80 + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + resourceGroup: + description: resourceGroup defines the Azure resource + group that contains the disk encryption set. The value + should consist of only alphanumberic characters, underscores + (_), parentheses, hyphens and periods. The value should + not end in a period and be at most 90 characters in + length. + maxLength: 90 + pattern: ^[\w\.\-\(\)]*[\w\-\(\)]$ + type: string + subscriptionID: + description: 'subscriptionID defines the Azure subscription + that contains the disk encryption set. The value should + meet the following conditions: 1. It should be a 128-bit + number. 2. It should be 36 characters (32 hexadecimal + characters and 4 hyphens) long. 3. It should be displayed + in five groups separated by hyphens (-). 4. The first + group should be 8 characters long. 5. The second, third, + and fourth groups should be 4 characters long. 6. The + fifth group should be 12 characters long. An Example + SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378' + maxLength: 36 + pattern: ^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$ + type: string + required: + - name + - resourceGroup + - subscriptionID + type: object + type: object + driverType: + description: 'driverType indicates type of CSI driver for which + the driverConfig is being applied to. Valid values are: AWS, + Azure, GCP, IBMCloud, vSphere and omitted. Consumers should + treat unknown values as a NO-OP.' + enum: + - "" + - AWS + - Azure + - GCP + - IBMCloud + - vSphere + type: string + gcp: + description: gcp is used to configure the GCP CSI driver. + properties: + kmsKey: + description: kmsKey sets the cluster default storage class + to encrypt volumes with customer-supplied encryption keys, + rather than the default keys managed by GCP. + properties: + keyRing: + description: keyRing is the name of the KMS Key Ring which + the KMS Key belongs to. The value should correspond + to an existing KMS key ring and should consist of only + alphanumeric characters, hyphens (-) and underscores + (_), and be at most 63 characters in length. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + location: + description: location is the GCP location in which the + Key Ring exists. The value must match an existing GCP + location, or "global". Defaults to global, if not set. + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + name: + description: name is the name of the customer-managed + encryption key to be used for disk encryption. The value + should correspond to an existing KMS key and should + consist of only alphanumeric characters, hyphens (-) + and underscores (_), and be at most 63 characters in + length. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + projectID: + description: projectID is the ID of the Project in which + the KMS Key Ring exists. It must be 6 to 30 lowercase + letters, digits, or hyphens. It must start with a letter. + Trailing hyphens are prohibited. + maxLength: 30 + minLength: 6 + pattern: ^[a-z][a-z0-9-]+[a-z0-9]$ + type: string + required: + - keyRing + - name + - projectID + type: object + type: object + ibmcloud: + description: ibmcloud is used to configure the IBM Cloud CSI driver. + properties: + encryptionKeyCRN: + description: encryptionKeyCRN is the IBM Cloud CRN of the + customer-managed root key to use for disk encryption of + volumes for the default storage classes. + maxLength: 154 + minLength: 144 + pattern: ^crn:v[0-9]+:bluemix:(public|private):(kms|hs-crypto):[a-z-]+:a/[0-9a-f]+:[0-9a-f-]{36}:key:[0-9a-f-]{36}$ + type: string + required: + - encryptionKeyCRN + type: object + vSphere: + description: vsphere is used to configure the vsphere CSI driver. + properties: + globalMaxSnapshotsPerBlockVolume: + description: 'globalMaxSnapshotsPerBlockVolume is a global + configuration parameter that applies to volumes on all kinds + of datastores. If omitted, the platform chooses a default, + which is subject to change over time, currently that default + is 3. Snapshots can not be disabled using this parameter. + Increasing number of snapshots above 3 can have negative + impact on performance, for more details see: https://kb.vmware.com/s/article/1025279 + Volume snapshot documentation: https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/3.0/vmware-vsphere-csp-getting-started/GUID-E0B41C69-7EEB-450F-A73D-5FD2FF39E891.html' + format: int32 + maximum: 32 + minimum: 1 + type: integer + granularMaxSnapshotsPerBlockVolumeInVSAN: + description: granularMaxSnapshotsPerBlockVolumeInVSAN is a + granular configuration parameter on vSAN datastore only. + It overrides GlobalMaxSnapshotsPerBlockVolume if set, while + it falls back to the global constraint if unset. Snapshots + for VSAN can not be disabled using this parameter. + format: int32 + maximum: 32 + minimum: 1 + type: integer + granularMaxSnapshotsPerBlockVolumeInVVOL: + description: granularMaxSnapshotsPerBlockVolumeInVVOL is a + granular configuration parameter on Virtual Volumes datastore + only. It overrides GlobalMaxSnapshotsPerBlockVolume if set, + while it falls back to the global constraint if unset. Snapshots + for VVOL can not be disabled using this parameter. + format: int32 + maximum: 32 + minimum: 1 + type: integer + topologyCategories: + description: topologyCategories indicates tag categories with + which vcenter resources such as hostcluster or datacenter + were tagged with. If cluster Infrastructure object has a + topology, values specified in Infrastructure object will + be used and modifications to topologyCategories will be + rejected. + items: + type: string + type: array + type: object + required: + - driverType + type: object + x-kubernetes-validations: + - message: ibmcloud must be set if driverType is 'IBMCloud', but remain + unset otherwise + rule: 'has(self.driverType) && self.driverType == ''IBMCloud'' ? + has(self.ibmcloud) : !has(self.ibmcloud)' + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + storageClassState: + description: StorageClassState determines if CSI operator should create + and manage storage classes. If this field value is empty or Managed + - CSI operator will continuously reconcile storage class and create + if necessary. If this field value is Unmanaged - CSI operator will + not reconcile any previously created storage class. If this field + value is Removed - CSI operator will delete the storage class it + created previously. When omitted, this means the user has no opinion + and the platform chooses a reasonable default, which is subject + to change over time. The current default behaviour is Managed. + enum: + - "" + - Managed + - Unmanaged + - Removed + type: string + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration + that was computed by the operator. Red Hat does not support the + use of this field. Misuse of this field could lead to unexpected + behavior or conflict with other configuration options. Seek guidance + from the Red Hat support before using this field. Use of this property + blocks cluster upgrades, it must be removed before upgrading your + cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + required: + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + x-kubernetes-list-type: atomic + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go index 3df3ef544c..d41982f2a2 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -5011,6 +5011,21 @@ func (in *VSphereCSIDriverConfigSpec) DeepCopyInto(out *VSphereCSIDriverConfigSp *out = make([]string, len(*in)) copy(*out, *in) } + if in.GlobalMaxSnapshotsPerBlockVolume != nil { + in, out := &in.GlobalMaxSnapshotsPerBlockVolume, &out.GlobalMaxSnapshotsPerBlockVolume + *out = new(uint32) + **out = **in + } + if in.GranularMaxSnapshotsPerBlockVolumeInVSAN != nil { + in, out := &in.GranularMaxSnapshotsPerBlockVolumeInVSAN, &out.GranularMaxSnapshotsPerBlockVolumeInVSAN + *out = new(uint32) + **out = **in + } + if in.GranularMaxSnapshotsPerBlockVolumeInVVOL != nil { + in, out := &in.GranularMaxSnapshotsPerBlockVolumeInVVOL, &out.GranularMaxSnapshotsPerBlockVolumeInVVOL + *out = new(uint32) + **out = **in + } return } diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml index 82fb98c86c..22992a02a0 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml @@ -68,7 +68,8 @@ clustercsidrivers.operator.openshift.io: CRDName: clustercsidrivers.operator.openshift.io Capability: "" Category: "" - FeatureGates: [] + FeatureGates: + - VSphereDriverConfiguration FilenameOperatorName: csi-driver FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_90" diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index 0743b412fe..95017ec934 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -501,8 +501,11 @@ func (IBMCloudCSIDriverConfigSpec) SwaggerDoc() map[string]string { } var map_VSphereCSIDriverConfigSpec = map[string]string{ - "": "VSphereCSIDriverConfigSpec defines properties that can be configured for vsphere CSI driver.", - "topologyCategories": "topologyCategories indicates tag categories with which vcenter resources such as hostcluster or datacenter were tagged with. If cluster Infrastructure object has a topology, values specified in Infrastructure object will be used and modifications to topologyCategories will be rejected.", + "": "VSphereCSIDriverConfigSpec defines properties that can be configured for vsphere CSI driver.", + "topologyCategories": "topologyCategories indicates tag categories with which vcenter resources such as hostcluster or datacenter were tagged with. If cluster Infrastructure object has a topology, values specified in Infrastructure object will be used and modifications to topologyCategories will be rejected.", + "globalMaxSnapshotsPerBlockVolume": "globalMaxSnapshotsPerBlockVolume is a global configuration parameter that applies to volumes on all kinds of datastores. If omitted, the platform chooses a default, which is subject to change over time, currently that default is 3. Snapshots can not be disabled using this parameter. Increasing number of snapshots above 3 can have negative impact on performance, for more details see: https://kb.vmware.com/s/article/1025279 Volume snapshot documentation: https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/3.0/vmware-vsphere-csp-getting-started/GUID-E0B41C69-7EEB-450F-A73D-5FD2FF39E891.html", + "granularMaxSnapshotsPerBlockVolumeInVSAN": "granularMaxSnapshotsPerBlockVolumeInVSAN is a granular configuration parameter on vSAN datastore only. It overrides GlobalMaxSnapshotsPerBlockVolume if set, while it falls back to the global constraint if unset. Snapshots for VSAN can not be disabled using this parameter.", + "granularMaxSnapshotsPerBlockVolumeInVVOL": "granularMaxSnapshotsPerBlockVolumeInVVOL is a granular configuration parameter on Virtual Volumes datastore only. It overrides GlobalMaxSnapshotsPerBlockVolume if set, while it falls back to the global constraint if unset. Snapshots for VVOL can not be disabled using this parameter.", } func (VSphereCSIDriverConfigSpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfigpoolstatus.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfigpoolstatus.go index 95a8ebc4a6..1dfea16dcc 100644 --- a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfigpoolstatus.go +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/machineconfigpoolstatus.go @@ -14,6 +14,7 @@ type MachineConfigPoolStatusApplyConfiguration struct { DegradedMachineCount *int32 `json:"degradedMachineCount,omitempty"` Conditions []MachineConfigPoolConditionApplyConfiguration `json:"conditions,omitempty"` CertExpirys []CertExpiryApplyConfiguration `json:"certExpirys,omitempty"` + PoolSynchronizersStatus []PoolSynchronizerStatusApplyConfiguration `json:"poolSynchronizersStatus,omitempty"` } // MachineConfigPoolStatusApplyConfiguration constructs an declarative configuration of the MachineConfigPoolStatus type for use with @@ -103,3 +104,16 @@ func (b *MachineConfigPoolStatusApplyConfiguration) WithCertExpirys(values ...*C } return b } + +// WithPoolSynchronizersStatus adds the given value to the PoolSynchronizersStatus field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PoolSynchronizersStatus field. +func (b *MachineConfigPoolStatusApplyConfiguration) WithPoolSynchronizersStatus(values ...*PoolSynchronizerStatusApplyConfiguration) *MachineConfigPoolStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPoolSynchronizersStatus") + } + b.PoolSynchronizersStatus = append(b.PoolSynchronizersStatus, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/poolsynchronizerstatus.go b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/poolsynchronizerstatus.go new file mode 100644 index 0000000000..7c94e0d34f --- /dev/null +++ b/vendor/github.com/openshift/client-go/machineconfiguration/applyconfigurations/machineconfiguration/v1/poolsynchronizerstatus.go @@ -0,0 +1,81 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/machineconfiguration/v1" +) + +// PoolSynchronizerStatusApplyConfiguration represents an declarative configuration of the PoolSynchronizerStatus type for use +// with apply. +type PoolSynchronizerStatusApplyConfiguration struct { + PoolSynchronizerType *v1.PoolSynchronizerType `json:"poolSynchronizerType,omitempty"` + MachineCount *int64 `json:"machineCount,omitempty"` + UpdatedMachineCount *int64 `json:"updatedMachineCount,omitempty"` + ReadyMachineCount *int64 `json:"readyMachineCount,omitempty"` + AvailableMachineCount *int64 `json:"availableMachineCount,omitempty"` + UnavailableMachineCount *int64 `json:"unavailableMachineCount,omitempty"` + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` +} + +// PoolSynchronizerStatusApplyConfiguration constructs an declarative configuration of the PoolSynchronizerStatus type for use with +// apply. +func PoolSynchronizerStatus() *PoolSynchronizerStatusApplyConfiguration { + return &PoolSynchronizerStatusApplyConfiguration{} +} + +// WithPoolSynchronizerType sets the PoolSynchronizerType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PoolSynchronizerType field is set to the value of the last call. +func (b *PoolSynchronizerStatusApplyConfiguration) WithPoolSynchronizerType(value v1.PoolSynchronizerType) *PoolSynchronizerStatusApplyConfiguration { + b.PoolSynchronizerType = &value + return b +} + +// WithMachineCount sets the MachineCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MachineCount field is set to the value of the last call. +func (b *PoolSynchronizerStatusApplyConfiguration) WithMachineCount(value int64) *PoolSynchronizerStatusApplyConfiguration { + b.MachineCount = &value + return b +} + +// WithUpdatedMachineCount sets the UpdatedMachineCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UpdatedMachineCount field is set to the value of the last call. +func (b *PoolSynchronizerStatusApplyConfiguration) WithUpdatedMachineCount(value int64) *PoolSynchronizerStatusApplyConfiguration { + b.UpdatedMachineCount = &value + return b +} + +// WithReadyMachineCount sets the ReadyMachineCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReadyMachineCount field is set to the value of the last call. +func (b *PoolSynchronizerStatusApplyConfiguration) WithReadyMachineCount(value int64) *PoolSynchronizerStatusApplyConfiguration { + b.ReadyMachineCount = &value + return b +} + +// WithAvailableMachineCount sets the AvailableMachineCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AvailableMachineCount field is set to the value of the last call. +func (b *PoolSynchronizerStatusApplyConfiguration) WithAvailableMachineCount(value int64) *PoolSynchronizerStatusApplyConfiguration { + b.AvailableMachineCount = &value + return b +} + +// WithUnavailableMachineCount sets the UnavailableMachineCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UnavailableMachineCount field is set to the value of the last call. +func (b *PoolSynchronizerStatusApplyConfiguration) WithUnavailableMachineCount(value int64) *PoolSynchronizerStatusApplyConfiguration { + b.UnavailableMachineCount = &value + return b +} + +// WithObservedGeneration sets the ObservedGeneration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ObservedGeneration field is set to the value of the last call. +func (b *PoolSynchronizerStatusApplyConfiguration) WithObservedGeneration(value int64) *PoolSynchronizerStatusApplyConfiguration { + b.ObservedGeneration = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go index 1403c9aad9..87bcc7629f 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go @@ -3599,6 +3599,15 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.operator.v1.VSphereCSIDriverConfigSpec map: fields: + - name: globalMaxSnapshotsPerBlockVolume + type: + scalar: numeric + - name: granularMaxSnapshotsPerBlockVolumeInVSAN + type: + scalar: numeric + - name: granularMaxSnapshotsPerBlockVolumeInVVOL + type: + scalar: numeric - name: topologyCategories type: list: diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/vspherecsidriverconfigspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/vspherecsidriverconfigspec.go index 027cd9dbfe..aabfc83ded 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/vspherecsidriverconfigspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/vspherecsidriverconfigspec.go @@ -5,7 +5,10 @@ package v1 // VSphereCSIDriverConfigSpecApplyConfiguration represents an declarative configuration of the VSphereCSIDriverConfigSpec type for use // with apply. type VSphereCSIDriverConfigSpecApplyConfiguration struct { - TopologyCategories []string `json:"topologyCategories,omitempty"` + TopologyCategories []string `json:"topologyCategories,omitempty"` + GlobalMaxSnapshotsPerBlockVolume *uint32 `json:"globalMaxSnapshotsPerBlockVolume,omitempty"` + GranularMaxSnapshotsPerBlockVolumeInVSAN *uint32 `json:"granularMaxSnapshotsPerBlockVolumeInVSAN,omitempty"` + GranularMaxSnapshotsPerBlockVolumeInVVOL *uint32 `json:"granularMaxSnapshotsPerBlockVolumeInVVOL,omitempty"` } // VSphereCSIDriverConfigSpecApplyConfiguration constructs an declarative configuration of the VSphereCSIDriverConfigSpec type for use with @@ -23,3 +26,27 @@ func (b *VSphereCSIDriverConfigSpecApplyConfiguration) WithTopologyCategories(va } return b } + +// WithGlobalMaxSnapshotsPerBlockVolume sets the GlobalMaxSnapshotsPerBlockVolume field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GlobalMaxSnapshotsPerBlockVolume field is set to the value of the last call. +func (b *VSphereCSIDriverConfigSpecApplyConfiguration) WithGlobalMaxSnapshotsPerBlockVolume(value uint32) *VSphereCSIDriverConfigSpecApplyConfiguration { + b.GlobalMaxSnapshotsPerBlockVolume = &value + return b +} + +// WithGranularMaxSnapshotsPerBlockVolumeInVSAN sets the GranularMaxSnapshotsPerBlockVolumeInVSAN field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GranularMaxSnapshotsPerBlockVolumeInVSAN field is set to the value of the last call. +func (b *VSphereCSIDriverConfigSpecApplyConfiguration) WithGranularMaxSnapshotsPerBlockVolumeInVSAN(value uint32) *VSphereCSIDriverConfigSpecApplyConfiguration { + b.GranularMaxSnapshotsPerBlockVolumeInVSAN = &value + return b +} + +// WithGranularMaxSnapshotsPerBlockVolumeInVVOL sets the GranularMaxSnapshotsPerBlockVolumeInVVOL field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GranularMaxSnapshotsPerBlockVolumeInVVOL field is set to the value of the last call. +func (b *VSphereCSIDriverConfigSpecApplyConfiguration) WithGranularMaxSnapshotsPerBlockVolumeInVVOL(value uint32) *VSphereCSIDriverConfigSpecApplyConfiguration { + b.GranularMaxSnapshotsPerBlockVolumeInVVOL = &value + return b +} diff --git a/vendor/modules.txt b/vendor/modules.txt index f207bbe765..aa809e439f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -818,7 +818,7 @@ github.com/opencontainers/runc/libcontainer/user # github.com/opencontainers/runtime-spec v1.1.0 ## explicit github.com/opencontainers/runtime-spec/specs-go -# github.com/openshift/api v0.0.0-20240410141538-3c0461467316 +# github.com/openshift/api v0.0.0-20240416170644-3efee4252217 ## explicit; go 1.21 github.com/openshift/api github.com/openshift/api/annotations @@ -860,7 +860,7 @@ github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests github.com/openshift/api/machineconfiguration/v1alpha1 github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests github.com/openshift/api/monitoring -github.com/openshift/api/monitoring/v1alpha1 +github.com/openshift/api/monitoring/v1 github.com/openshift/api/network github.com/openshift/api/network/v1 github.com/openshift/api/network/v1alpha1 @@ -898,7 +898,7 @@ github.com/openshift/api/template github.com/openshift/api/template/v1 github.com/openshift/api/user github.com/openshift/api/user/v1 -# github.com/openshift/client-go v0.0.0-20240408153607-64bd6feb83ae +# github.com/openshift/client-go v0.0.0-20240415214935-be70f772f157 ## explicit; go 1.21 github.com/openshift/client-go/build/applyconfigurations/build/v1 github.com/openshift/client-go/build/applyconfigurations/internal From 7fb269f062b3907f55479c47b70303119f0728c4 Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Wed, 17 Apr 2024 11:09:28 -0400 Subject: [PATCH 09/18] install: sync crds Signed-off-by: Sam Batschelet --- ...controllerconfigs-CustomNoUpgrade.crd.yaml | 108 ++++++++++++++---- ...nfig_01_controllerconfigs-Default.crd.yaml | 90 ++++++++++++--- ...ollerconfigs-TechPreviewNoUpgrade.crd.yaml | 108 ++++++++++++++---- ...achineconfigpools-CustomNoUpgrade.crd.yaml | 76 ++++++++++++ ...econfigpools-TechPreviewNoUpgrade.crd.yaml | 76 ++++++++++++ 5 files changed, 398 insertions(+), 60 deletions(-) diff --git a/install/0000_80_machine-config_01_controllerconfigs-CustomNoUpgrade.crd.yaml b/install/0000_80_machine-config_01_controllerconfigs-CustomNoUpgrade.crd.yaml index 69bead6760..d55c788195 100644 --- a/install/0000_80_machine-config_01_controllerconfigs-CustomNoUpgrade.crd.yaml +++ b/install/0000_80_machine-config_01_controllerconfigs-CustomNoUpgrade.crd.yaml @@ -377,15 +377,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' ingressIPs: description: ingressIPs are the external IPs which @@ -401,15 +406,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used @@ -419,8 +429,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -692,15 +706,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' ingressIPs: description: ingressIPs are the external IPs which @@ -716,15 +735,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used @@ -734,8 +758,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -838,15 +866,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' failureDomains: description: failureDomains contains the definition @@ -1000,15 +1033,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used @@ -1018,8 +1056,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1545,8 +1587,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1659,8 +1705,12 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 16 type: array x-kubernetes-list-type: set @@ -1676,8 +1726,12 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 16 type: array x-kubernetes-list-type: set @@ -1692,8 +1746,12 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 16 type: array x-kubernetes-list-type: set @@ -2142,8 +2200,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -2438,8 +2500,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set diff --git a/install/0000_80_machine-config_01_controllerconfigs-Default.crd.yaml b/install/0000_80_machine-config_01_controllerconfigs-Default.crd.yaml index 5c64eb7f2c..a328ea4ae4 100644 --- a/install/0000_80_machine-config_01_controllerconfigs-Default.crd.yaml +++ b/install/0000_80_machine-config_01_controllerconfigs-Default.crd.yaml @@ -377,15 +377,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' ingressIPs: description: ingressIPs are the external IPs which @@ -401,15 +406,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used @@ -419,8 +429,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -692,15 +706,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' ingressIPs: description: ingressIPs are the external IPs which @@ -716,15 +735,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used @@ -734,8 +758,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -838,15 +866,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' failureDomains: description: failureDomains contains the definition @@ -1000,15 +1033,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used @@ -1018,8 +1056,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1545,8 +1587,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1906,8 +1952,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -2202,8 +2252,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set diff --git a/install/0000_80_machine-config_01_controllerconfigs-TechPreviewNoUpgrade.crd.yaml b/install/0000_80_machine-config_01_controllerconfigs-TechPreviewNoUpgrade.crd.yaml index 0203f30d1b..4be3b0e47f 100644 --- a/install/0000_80_machine-config_01_controllerconfigs-TechPreviewNoUpgrade.crd.yaml +++ b/install/0000_80_machine-config_01_controllerconfigs-TechPreviewNoUpgrade.crd.yaml @@ -377,15 +377,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' ingressIPs: description: ingressIPs are the external IPs which @@ -401,15 +406,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used @@ -419,8 +429,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -692,15 +706,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' ingressIPs: description: ingressIPs are the external IPs which @@ -716,15 +735,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used @@ -734,8 +758,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -838,15 +866,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: apiServerInternalIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' failureDomains: description: failureDomains contains the definition @@ -1000,15 +1033,20 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 2 type: array x-kubernetes-list-type: set x-kubernetes-validations: - message: ingressIPs must contain at most one IPv4 address and at most one IPv6 address - rule: 'size(self) == 2 ? self.exists_one(x, x.contains('':'')) + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' machineNetworks: description: machineNetworks are IP networks used @@ -1018,8 +1056,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1545,8 +1587,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -1659,8 +1705,12 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 16 type: array x-kubernetes-list-type: set @@ -1676,8 +1726,12 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 16 type: array x-kubernetes-list-type: set @@ -1692,8 +1746,12 @@ spec: items: description: IP is an IP address (for example, "10.0.0.0" or "fd00::"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*) + maxLength: 39 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) maxItems: 16 type: array x-kubernetes-list-type: set @@ -2142,8 +2200,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set @@ -2438,8 +2500,12 @@ spec: items: description: CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). - pattern: (^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/(3[0-2]|[1-2][0-9]|[0-9]))$)|(^s*((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]d|1dd|[1-9]?d)(.(25[0-5]|2[0-4]d|1dd|[1-9]?d)){3}))|:)))(%.+)?s*(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))$) + maxLength: 43 + minLength: 1 type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) maxItems: 32 type: array x-kubernetes-list-type: set diff --git a/install/0000_80_machine-config_01_machineconfigpools-CustomNoUpgrade.crd.yaml b/install/0000_80_machine-config_01_machineconfigpools-CustomNoUpgrade.crd.yaml index 7e4d18cb51..c4df02a787 100644 --- a/install/0000_80_machine-config_01_machineconfigpools-CustomNoUpgrade.crd.yaml +++ b/install/0000_80_machine-config_01_machineconfigpools-CustomNoUpgrade.crd.yaml @@ -525,6 +525,82 @@ spec: by the controller. format: int64 type: integer + poolSynchronizersStatus: + description: poolSynchronizersStatus is the status of the machines + managed by the pool synchronizers. + items: + properties: + availableMachineCount: + description: availableMachineCount is the number of machines + managed by the node synchronizer which are available. + format: int64 + minimum: 0 + type: integer + machineCount: + description: machineCount is the number of machines that are + managed by the node synchronizer. + format: int64 + minimum: 0 + type: integer + observedGeneration: + description: observedGeneration is the last generation change + that has been applied. + format: int64 + minimum: 0 + type: integer + x-kubernetes-validations: + - message: observedGeneration must not move backwards except + to zero + rule: self >= oldSelf || (self == 0 && oldSelf > 0) + poolSynchronizerType: + description: poolSynchronizerType describes the type of the + pool synchronizer. + enum: + - PinnedImageSets + maxLength: 256 + type: string + readyMachineCount: + description: readyMachineCount is the number of machines managed + by the node synchronizer that are in a ready state. + format: int64 + minimum: 0 + type: integer + unavailableMachineCount: + description: unavailableMachineCount is the number of machines + managed by the node synchronizer but are unavailable. + format: int64 + minimum: 0 + type: integer + updatedMachineCount: + description: updatedMachineCount is the number of machines that + have been updated by the node synchronizer. + format: int64 + minimum: 0 + type: integer + required: + - availableMachineCount + - machineCount + - poolSynchronizerType + - readyMachineCount + - unavailableMachineCount + - updatedMachineCount + type: object + x-kubernetes-validations: + - message: machineCount must be greater than or equal to updatedMachineCount + rule: self.machineCount >= self.updatedMachineCount + - message: machineCount must be greater than or equal to availableMachineCount + rule: self.machineCount >= self.availableMachineCount + - message: machineCount must be greater than or equal to unavailableMachineCount + rule: self.machineCount >= self.unavailableMachineCount + - message: machineCount must be greater than or equal to readyMachineCount + rule: self.machineCount >= self.readyMachineCount + - message: availableMachineCount must be greater than or equal to + readyMachineCount + rule: self.availableMachineCount >= self.readyMachineCount + type: array + x-kubernetes-list-map-keys: + - poolSynchronizerType + x-kubernetes-list-type: map readyMachineCount: description: readyMachineCount represents the total number of ready machines targeted by the pool. diff --git a/install/0000_80_machine-config_01_machineconfigpools-TechPreviewNoUpgrade.crd.yaml b/install/0000_80_machine-config_01_machineconfigpools-TechPreviewNoUpgrade.crd.yaml index d87cb5a06d..f04afccf18 100644 --- a/install/0000_80_machine-config_01_machineconfigpools-TechPreviewNoUpgrade.crd.yaml +++ b/install/0000_80_machine-config_01_machineconfigpools-TechPreviewNoUpgrade.crd.yaml @@ -525,6 +525,82 @@ spec: by the controller. format: int64 type: integer + poolSynchronizersStatus: + description: poolSynchronizersStatus is the status of the machines + managed by the pool synchronizers. + items: + properties: + availableMachineCount: + description: availableMachineCount is the number of machines + managed by the node synchronizer which are available. + format: int64 + minimum: 0 + type: integer + machineCount: + description: machineCount is the number of machines that are + managed by the node synchronizer. + format: int64 + minimum: 0 + type: integer + observedGeneration: + description: observedGeneration is the last generation change + that has been applied. + format: int64 + minimum: 0 + type: integer + x-kubernetes-validations: + - message: observedGeneration must not move backwards except + to zero + rule: self >= oldSelf || (self == 0 && oldSelf > 0) + poolSynchronizerType: + description: poolSynchronizerType describes the type of the + pool synchronizer. + enum: + - PinnedImageSets + maxLength: 256 + type: string + readyMachineCount: + description: readyMachineCount is the number of machines managed + by the node synchronizer that are in a ready state. + format: int64 + minimum: 0 + type: integer + unavailableMachineCount: + description: unavailableMachineCount is the number of machines + managed by the node synchronizer but are unavailable. + format: int64 + minimum: 0 + type: integer + updatedMachineCount: + description: updatedMachineCount is the number of machines that + have been updated by the node synchronizer. + format: int64 + minimum: 0 + type: integer + required: + - availableMachineCount + - machineCount + - poolSynchronizerType + - readyMachineCount + - unavailableMachineCount + - updatedMachineCount + type: object + x-kubernetes-validations: + - message: machineCount must be greater than or equal to updatedMachineCount + rule: self.machineCount >= self.updatedMachineCount + - message: machineCount must be greater than or equal to availableMachineCount + rule: self.machineCount >= self.availableMachineCount + - message: machineCount must be greater than or equal to unavailableMachineCount + rule: self.machineCount >= self.unavailableMachineCount + - message: machineCount must be greater than or equal to readyMachineCount + rule: self.machineCount >= self.readyMachineCount + - message: availableMachineCount must be greater than or equal to + readyMachineCount + rule: self.availableMachineCount >= self.readyMachineCount + type: array + x-kubernetes-list-map-keys: + - poolSynchronizerType + x-kubernetes-list-type: map readyMachineCount: description: readyMachineCount represents the total number of ready machines targeted by the pool. From eb6d2bfcb2951a28facd61e21c16a49919144445 Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Wed, 17 Apr 2024 11:17:23 -0400 Subject: [PATCH 10/18] mcc: update pinned image set controller to use correct degraded type Signed-off-by: Sam Batschelet --- .../pinnedimageset/pinned_image_set.go | 37 ++++++++----------- .../pinnedimageset/pinned_image_set_test.go | 4 +- 2 files changed, 17 insertions(+), 24 deletions(-) diff --git a/pkg/controller/pinnedimageset/pinned_image_set.go b/pkg/controller/pinnedimageset/pinned_image_set.go index 6e921db4df..36e5cf99bb 100644 --- a/pkg/controller/pinnedimageset/pinned_image_set.go +++ b/pkg/controller/pinnedimageset/pinned_image_set.go @@ -7,16 +7,6 @@ import ( "sort" "time" - mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" - mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" - "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" - mcfginformersv1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1" - mcfginformersv1alpha1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1" - mcfglistersv1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1" - mcfglistersv1alpha1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1" - "github.com/openshift/machine-config-operator/pkg/apihelpers" - ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -29,6 +19,17 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" + + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + mcfgclientset "github.com/openshift/client-go/machineconfiguration/clientset/versioned" + "github.com/openshift/client-go/machineconfiguration/clientset/versioned/scheme" + mcfginformersv1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1" + mcfginformersv1alpha1 "github.com/openshift/client-go/machineconfiguration/informers/externalversions/machineconfiguration/v1alpha1" + mcfglistersv1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1" + mcfglistersv1alpha1 "github.com/openshift/client-go/machineconfiguration/listers/machineconfiguration/v1alpha1" + "github.com/openshift/machine-config-operator/pkg/apihelpers" + ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" ) const ( @@ -39,7 +40,7 @@ const ( // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s maxRetries = 15 - // delay is a pause to avoid churn in PinnedImageSets; see + // delay is a pause to avoid churn in PinnedImageSets delay = 5 * time.Second ) @@ -72,8 +73,7 @@ func New( eventBroadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) ctrl := &Controller{ - client: mcfgClient, - + client: mcfgClient, eventRecorder: ctrlcommon.NamespacedEventRecorder(eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "machineconfigcontroller-pinnedimagesetcontroller"})), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "machineconfigcontroller-pinnedimagesetcontroller"), } @@ -103,7 +103,6 @@ func New( // Run executes the render controller. func (ctrl *Controller) Run(workers int, stopCh <-chan struct{}) { - klog.Info("Starting MachineConfigController-PinnedImageSetController") defer utilruntime.HandleCrash() defer ctrl.queue.ShutDown() @@ -123,7 +122,6 @@ func (ctrl *Controller) Run(workers int, stopCh <-chan struct{}) { func (ctrl *Controller) addMachineConfigPool(obj interface{}) { pool := obj.(*mcfgv1.MachineConfigPool) - klog.V(4).Infof("Adding MachineConfigPool %s", pool.Name) ctrl.enqueueMachineConfigPool(pool) } @@ -227,10 +225,6 @@ func (ctrl *Controller) deletePinnedImageSet(obj interface{}) { } func (ctrl *Controller) getPoolsForPinnedImageSet(imageSet *mcfgv1alpha1.PinnedImageSet) ([]*mcfgv1.MachineConfigPool, error) { - if len(imageSet.Labels) == 0 { - return nil, fmt.Errorf("could not find any MachineConfigPool set for PinnedImageSet %s with labels: %v", imageSet.Name, imageSet.Labels) - } - pList, err := ctrl.mcpLister.List(labels.Everything()) if err != nil { return nil, err @@ -359,11 +353,10 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { } func (ctrl *Controller) syncAvailableStatus(pool *mcfgv1.MachineConfigPool) error { - // TODO: update with the correct condition MachineConfigPoolPinnedImageSetsDegraded if apihelpers.IsMachineConfigPoolConditionFalse(pool.Status.Conditions, mcfgv1.MachineConfigPoolRenderDegraded) { return nil } - sdegraded := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionFalse, "", "") + sdegraded := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolPinnedImageSetsDegraded, corev1.ConditionFalse, "", "") apihelpers.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) if _, err := ctrl.client.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); err != nil { return err @@ -372,7 +365,7 @@ func (ctrl *Controller) syncAvailableStatus(pool *mcfgv1.MachineConfigPool) erro } func (ctrl *Controller) syncFailingStatus(pool *mcfgv1.MachineConfigPool, err error) error { - sdegraded := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolRenderDegraded, corev1.ConditionTrue, "", fmt.Sprintf("Failed to populate pinned image sets for pool %s: %v", pool.Name, err)) + sdegraded := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolPinnedImageSetsDegraded, corev1.ConditionTrue, "", fmt.Sprintf("Failed to populate pinned image sets for pool %s: %v", pool.Name, err)) apihelpers.SetMachineConfigPoolCondition(&pool.Status, *sdegraded) if _, updateErr := ctrl.client.MachineconfigurationV1().MachineConfigPools().UpdateStatus(context.TODO(), pool, metav1.UpdateOptions{}); updateErr != nil { klog.Errorf("Error updating MachineConfigPool %s: %v", pool.Name, updateErr) diff --git a/pkg/controller/pinnedimageset/pinned_image_set_test.go b/pkg/controller/pinnedimageset/pinned_image_set_test.go index a6a9d117fd..eec5bf1ce0 100644 --- a/pkg/controller/pinnedimageset/pinned_image_set_test.go +++ b/pkg/controller/pinnedimageset/pinned_image_set_test.go @@ -155,9 +155,9 @@ func TestUpdateSpecNewPinnedImageSet(t *testing.T) { err := c.syncHandler(mcp.Name) require.NoError(err) - mcp, err = c.client.MachineconfigurationV1().MachineConfigPools().Get(ctx, mcp.Name, metav1.GetOptions{}) + mcpNew, err := c.client.MachineconfigurationV1().MachineConfigPools().Get(ctx, mcp.Name, metav1.GetOptions{}) require.NoError(err) - require.Equal(mcp.Spec.PinnedImageSets, tt.wantSpecPis) + require.Equal(mcpNew.Spec.PinnedImageSets, tt.wantSpecPis) }) } } From 8509ca9db088afe5148100e9aa04b3111e239e98 Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Wed, 17 Apr 2024 11:19:09 -0400 Subject: [PATCH 11/18] *: plumb in pinned image set degraded condition Signed-off-by: Sam Batschelet --- pkg/controller/node/status.go | 3 ++- pkg/operator/status.go | 3 +++ test/helpers/helpers.go | 7 +++++++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/pkg/controller/node/status.go b/pkg/controller/node/status.go index 95ca7863c7..7975317e42 100644 --- a/pkg/controller/node/status.go +++ b/pkg/controller/node/status.go @@ -235,7 +235,8 @@ func calculateStatus(mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.Contr // but we might have a dedicated controller or control loop somewhere else that understands how to // set Degraded. For now, the node_controller understand NodeDegraded & RenderDegraded = Degraded. renderDegraded := apihelpers.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolRenderDegraded) - if nodeDegraded || renderDegraded { + pinnedImageSetsDegraded := apihelpers.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolPinnedImageSetsDegraded) + if nodeDegraded || renderDegraded || pinnedImageSetsDegraded { sdegraded := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolDegraded, corev1.ConditionTrue, "", "") apihelpers.SetMachineConfigPoolCondition(&status, *sdegraded) diff --git a/pkg/operator/status.go b/pkg/operator/status.go index 58c6318773..347939aea3 100644 --- a/pkg/operator/status.go +++ b/pkg/operator/status.go @@ -817,6 +817,9 @@ func machineConfigPoolStatus(pool *mcfgv1.MachineConfigPool) string { return fmt.Sprintf("all %d nodes are at latest configuration %s", pool.Status.MachineCount, pool.Status.Configuration.Name) case apihelpers.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolUpdating): return fmt.Sprintf("%d (ready %d) out of %d nodes are updating to latest configuration %s", pool.Status.UpdatedMachineCount, pool.Status.ReadyMachineCount, pool.Status.MachineCount, pool.Spec.Configuration.Name) + case apihelpers.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolPinnedImageSetsDegraded): + cond := apihelpers.GetMachineConfigPoolCondition(pool.Status, mcfgv1.MachineConfigPoolPinnedImageSetsDegraded) + return fmt.Sprintf("pool is degraded because pinned image sets failed with %q: %q", cond.Reason, cond.Message) default: return "" } diff --git a/test/helpers/helpers.go b/test/helpers/helpers.go index c6fefdb351..376c3dfc79 100644 --- a/test/helpers/helpers.go +++ b/test/helpers/helpers.go @@ -176,6 +176,13 @@ func NewMachineConfigPool(name string, mcSelector, nodeSelector *metav1.LabelSel Reason: "", Message: "", }, + { + Type: mcfgv1.MachineConfigPoolPinnedImageSetsDegraded, + Status: corev1.ConditionFalse, + LastTransitionTime: metav1.Unix(0, 0), + Reason: "", + Message: "", + }, }, }, } From eaf6206dc2f81b75a9f78eca57d379ad85068a3a Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Thu, 18 Apr 2024 10:52:23 -0400 Subject: [PATCH 12/18] bootstrap-e2e: add remote-bucket flag Signed-off-by: Sam Batschelet --- test/framework/envtest.go | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/test/framework/envtest.go b/test/framework/envtest.go index 1fc548cc29..1e5bd857a8 100644 --- a/test/framework/envtest.go +++ b/test/framework/envtest.go @@ -27,10 +27,13 @@ import ( const ( OpenshiftConfigNamespace string = "openshift-config" - - // TODO: Figure out how to obtain this value programmatically so we don't - // have to remember to increment it. - k8sVersion string = "1.26.1" + // + // Although the MCO has been bumped to 1.29.2, openshift-kubebuilder-tools + // does not have an archive for that kube version yet. For now, hardcode to match it. + // More info here: + // https://github.com/openshift/api/pull/1774 + // https://github.com/openshift/api/blob/master/tools/publish-kubebuilder-tools/README.md#using-the-archives + k8sVersion string = "1.29.1" ) // This is needed because both setup-envtest and the kubebuilder tools assume @@ -82,7 +85,11 @@ func setupEnvTest(t *testing.T) (string, error) { os.Setenv("HOME", homeDir) } - cmd := exec.Command(setupEnvTestBinPath, "use", k8sVersion, "-p", "path") + // Use the remote-bucket flag to keep up with openshift/api's divergence + // More info: + // https://github.com/openshift/api/pull/1774, + // https://github.com/openshift/api/blob/master/tools/publish-kubebuilder-tools/README.md#using-the-archives + cmd := exec.Command(setupEnvTestBinPath, "use", k8sVersion, "-p", "path", "--remote-bucket", "openshift-kubebuilder-tools") t.Log("Setting up EnvTest: $", cmd) // We want to consume the path of where setup-envtest installed the From 97d6a4c7d9e2b4f6e3c2d47aae854509c787e547 Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Thu, 18 Apr 2024 10:53:14 -0400 Subject: [PATCH 13/18] pkg: cleanup controller and improve tests Signed-off-by: Sam Batschelet --- cmd/machine-config-controller/start.go | 32 +- cmd/machine-config-daemon/start.go | 1 + go.mod | 4 +- go.sum | 8 +- ...rollerconfigs-DevPreviewNoUpgrade.crd.yaml | 2759 +++++++++++++++++ ...neconfignodes-DevPreviewNoUpgrade.crd.yaml | 366 +++ ...neconfigpools-DevPreviewNoUpgrade.crd.yaml | 629 ++++ ...chineosbuilds-DevPreviewNoUpgrade.crd.yaml | 300 ++ ...hineosconfigs-DevPreviewNoUpgrade.crd.yaml | 352 +++ ...nnedimagesets-DevPreviewNoUpgrade.crd.yaml | 165 + pkg/controller/node/node_controller_test.go | 59 +- pkg/controller/node/status.go | 12 +- pkg/controller/node/status_test.go | 15 +- .../pinnedimageset/pinned_image_set.go | 6 +- .../pinnedimageset/pinned_image_set_test.go | 200 +- pkg/daemon/cri/cri.go | 2 + pkg/daemon/pinned_image_set.go | 655 ++-- pkg/daemon/pinned_image_set_test.go | 358 ++- pkg/operator/operator_test.go | 4 + pkg/operator/status.go | 35 +- pkg/operator/status_test.go | 31 +- pkg/operator/sync.go | 11 +- .../openshift/api/config/v1/feature_gates.go | 116 +- .../openshift/api/config/v1/types_feature.go | 10 +- ...usterversions-DevPreviewNoUpgrade.crd.yaml | 781 +++++ ...SelfManagedHA-DevPreviewNoUpgrade.crd.yaml | 553 ++++ ...0_config-operator_01_featuregates.crd.yaml | 3 + ...frastructures-DevPreviewNoUpgrade.crd.yaml | 2149 +++++++++++++ ...r_01_networks-DevPreviewNoUpgrade.crd.yaml | 433 +++ ...01_schedulers-DevPreviewNoUpgrade.crd.yaml | 130 + ...or_01_backups-DevPreviewNoUpgrade.crd.yaml | 142 + ...imagepolicies-DevPreviewNoUpgrade.crd.yaml | 398 +++ ...imagepolicies-DevPreviewNoUpgrade.crd.yaml | 398 +++ ...tsdatagathers-DevPreviewNoUpgrade.crd.yaml | 88 + vendor/github.com/openshift/api/features.md | 123 +- .../api/machineconfiguration/v1/types.go | 2 +- ...rollerconfigs-DevPreviewNoUpgrade.crd.yaml | 2759 +++++++++++++++++ ...neconfigpools-DevPreviewNoUpgrade.crd.yaml | 629 ++++ ...neconfignodes-DevPreviewNoUpgrade.crd.yaml | 366 +++ ...chineosbuilds-DevPreviewNoUpgrade.crd.yaml | 300 ++ ...hineosconfigs-DevPreviewNoUpgrade.crd.yaml | 352 +++ ...nnedimagesets-DevPreviewNoUpgrade.crd.yaml | 165 + ...etcd_01_etcds-DevPreviewNoUpgrade.crd.yaml | 277 ++ ...onfigurations-DevPreviewNoUpgrade.crd.yaml | 1293 ++++++++ ...tercsidrivers-DevPreviewNoUpgrade.crd.yaml | 408 +++ ...1_etcdbackups-DevPreviewNoUpgrade.crd.yaml | 158 + ...nager_01_olms-DevPreviewNoUpgrade.crd.yaml | 179 ++ vendor/modules.txt | 4 +- 48 files changed, 17470 insertions(+), 750 deletions(-) create mode 100644 install/0000_80_machine-config_01_controllerconfigs-DevPreviewNoUpgrade.crd.yaml create mode 100644 install/0000_80_machine-config_01_machineconfignodes-DevPreviewNoUpgrade.crd.yaml create mode 100644 install/0000_80_machine-config_01_machineconfigpools-DevPreviewNoUpgrade.crd.yaml create mode 100644 install/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml create mode 100644 install/0000_80_machine-config_01_machineosconfigs-DevPreviewNoUpgrade.crd.yaml create mode 100644 install/0000_80_machine-config_01_pinnedimagesets-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_networks-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_backups-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_clusterimagepolicies-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_imagepolicies-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfignodes-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosconfigs-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_pinnedimagesets-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-DevPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms-DevPreviewNoUpgrade.crd.yaml diff --git a/cmd/machine-config-controller/start.go b/cmd/machine-config-controller/start.go index 2a4a12a505..1460a36623 100644 --- a/cmd/machine-config-controller/start.go +++ b/cmd/machine-config-controller/start.go @@ -84,13 +84,6 @@ func runStartCmd(_ *cobra.Command, _ []string) { ctrlctx.FeatureGateAccess, ) - pinnedImageSet := pinnedimageset.New( - ctrlctx.InformerFactory.Machineconfiguration().V1alpha1().PinnedImageSets(), - ctrlctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), - ctrlctx.ClientBuilder.KubeClientOrDie("pinned-image-set-controller"), - ctrlctx.ClientBuilder.MachineConfigClientOrDie("pinned-image-set-controller"), - ) - // Start the shared factory informers that you need to use in your controller ctrlctx.InformerFactory.Start(ctrlctx.Stop) ctrlctx.KubeInformerFactory.Start(ctrlctx.Stop) @@ -112,6 +105,19 @@ func runStartCmd(_ *cobra.Command, _ []string) { enabled, disabled := getEnabledDisabledFeatures(features) klog.Infof("FeatureGates initialized: enabled=%v disabled=%v", enabled, disabled) + if features.Enabled(configv1.FeatureGatePinnedImages) { + pinnedImageSet := pinnedimageset.New( + ctrlctx.InformerFactory.Machineconfiguration().V1alpha1().PinnedImageSets(), + ctrlctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), + ctrlctx.ClientBuilder.KubeClientOrDie("pinned-image-set-controller"), + ctrlctx.ClientBuilder.MachineConfigClientOrDie("pinned-image-set-controller"), + ) + + go pinnedImageSet.Run(2, ctrlctx.Stop) + // start the informers again to enable feature gated types. + // see comments in SharedInformerFactory interface. + ctrlctx.InformerFactory.Start(ctrlctx.Stop) + } case <-time.After(1 * time.Minute): klog.Errorf("timed out waiting for FeatureGate detection") os.Exit(1) @@ -122,17 +128,7 @@ func runStartCmd(_ *cobra.Command, _ []string) { } go draincontroller.Run(5, ctrlctx.Stop) - fg, err := ctrlctx.FeatureGateAccess.CurrentFeatureGates() - if err != nil { - klog.Fatalf("Could not get feature gate: %v", err) - } - if fg.Enabled(configv1.FeatureGatePinnedImages) { - go pinnedImageSet.Run(2, ctrlctx.Stop) - } else { - klog.Infof("FeatureGate %s is disabled, not starting the pinned image set controller", configv1.FeatureGatePinnedImages) - } - - // wait here in this function until the context gets cancelled (which tells us whe were being shut down) + // wait here in this function until the context gets cancelled (which tells us when we are being shut down) <-ctx.Done() } diff --git a/cmd/machine-config-daemon/start.go b/cmd/machine-config-daemon/start.go index 8ade1a152e..164a3d6276 100644 --- a/cmd/machine-config-daemon/start.go +++ b/cmd/machine-config-daemon/start.go @@ -230,6 +230,7 @@ func runStartCmd(_ *cobra.Command, _ []string) { resource.MustParse(constants.MinFreeStorageAfterPrefetch), constants.DefaultCRIOSocketPath, constants.KubeletAuthFile, + constants.ContainerRegistryConfPath, prefetchTimeout, ctrlctx.FeatureGateAccess, ) diff --git a/go.mod b/go.mod index af40e45036..3f2c77a5c2 100644 --- a/go.mod +++ b/go.mod @@ -27,8 +27,8 @@ require ( github.com/google/renameio v0.1.0 github.com/imdario/mergo v0.3.13 github.com/opencontainers/go-digest v1.0.0 - github.com/openshift/api v0.0.0-20240416170644-3efee4252217 - github.com/openshift/client-go v0.0.0-20240415214935-be70f772f157 + github.com/openshift/api v0.0.0-20240422085825-2624175e9673 + github.com/openshift/client-go v0.0.0-20240422164335-6c851f4919dd github.com/openshift/cluster-config-operator v0.0.0-alpha.0.0.20231213185242-e4dc676febfe github.com/openshift/library-go v0.0.0-20240412173449-eb2f24c36528 github.com/openshift/runtime-utils v0.0.0-20230921210328-7bdb5b9c177b diff --git a/go.sum b/go.sum index f000736d93..4747ab0ec1 100644 --- a/go.sum +++ b/go.sum @@ -689,10 +689,10 @@ github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQB github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M= github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/openshift/api v0.0.0-20240416170644-3efee4252217 h1:86Q6ukNk2zFmBGnVeSloph4U7yuPdcfNyu268xgrAUY= -github.com/openshift/api v0.0.0-20240416170644-3efee4252217/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4= -github.com/openshift/client-go v0.0.0-20240415214935-be70f772f157 h1:xbd4qHpyFnnOYDHHnMQa+100MR+K/DFiC1J3BFdL+tQ= -github.com/openshift/client-go v0.0.0-20240415214935-be70f772f157/go.mod h1:Q3mt/X5xrxnR5R6BE7duF2ToLioRQJYnTYaaDS4QZTs= +github.com/openshift/api v0.0.0-20240422085825-2624175e9673 h1:D4qblu6z2A92fh7u9Nt1YskDtu+GySKiYP/D3tMWQ6A= +github.com/openshift/api v0.0.0-20240422085825-2624175e9673/go.mod h1:CxgbWAlvu2iQB0UmKTtRu1YfepRg1/vJ64n2DlIEVz4= +github.com/openshift/client-go v0.0.0-20240422164335-6c851f4919dd h1:z5TPsTaB8Zzvv9fK/kVB6X+FG1GtwM56WfoanhlbyyQ= +github.com/openshift/client-go v0.0.0-20240422164335-6c851f4919dd/go.mod h1:OC07uJXbaW/s21N6XDucROlmfUOhMXD5OrY3ZN3DmiM= github.com/openshift/cluster-config-operator v0.0.0-alpha.0.0.20231213185242-e4dc676febfe h1:wDQtyIbJJIoif2Ux0S+9MJWIWEGV0oG+iLm8WtqwdSw= github.com/openshift/cluster-config-operator v0.0.0-alpha.0.0.20231213185242-e4dc676febfe/go.mod h1:SGUtv1pKZSzSVr2YCxXFvhE+LbGfI+vcetEhNicKayw= github.com/openshift/kube-openapi v0.0.0-20230816122517-ffc8f001abb0 h1:GPlAy197Jkr+D0T2FNWanamraTdzS/r9ZkT29lxvHaA= diff --git a/install/0000_80_machine-config_01_controllerconfigs-DevPreviewNoUpgrade.crd.yaml b/install/0000_80_machine-config_01_controllerconfigs-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..a62c75eb2e --- /dev/null +++ b/install/0000_80_machine-config_01_controllerconfigs-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,2759 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1453 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: controllerconfigs.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: ControllerConfig + listKind: ControllerConfigList + plural: controllerconfigs + singular: controllerconfig + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ControllerConfig describes configuration for MachineConfigController. + This is currently only used to drive the MachineConfig objects generated + by the TemplateController. \n Compatibility level 1: Stable within a major + release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ControllerConfigSpec is the spec for ControllerConfig resource. + properties: + additionalTrustBundle: + description: additionalTrustBundle is a certificate bundle that will + be added to the nodes trusted certificate store. + format: byte + nullable: true + type: string + baseOSContainerImage: + description: BaseOSContainerImage is the new-format container image + for operating system updates. + type: string + baseOSExtensionsContainerImage: + description: BaseOSExtensionsContainerImage is the matching extensions + container for the new-format container + type: string + cloudProviderCAData: + description: cloudProvider specifies the cloud provider CA data + format: byte + nullable: true + type: string + cloudProviderConfig: + description: cloudProviderConfig is the configuration for the given + cloud provider + type: string + clusterDNSIP: + description: clusterDNSIP is the cluster DNS IP address + type: string + dns: + description: dns holds the cluster dns details + nullable: true + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'metadata is the standard object''s metadata. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + spec: + description: spec holds user settable values for configuration + properties: + baseDomain: + description: "baseDomain is the base domain of the cluster. + All managed DNS records will be sub-domains of this base. + \n For example, given the base domain `openshift.example.com`, + an API server DNS record may be created for `cluster-api.openshift.example.com`. + \n Once set, this field cannot be changed." + type: string + platform: + description: platform holds configuration specific to the + underlying infrastructure provider for DNS. When omitted, + this means the user has no opinion and the platform is left + to choose reasonable defaults. These defaults are subject + to change over time. + properties: + aws: + description: aws contains DNS configuration specific to + the Amazon Web Services cloud provider. + properties: + privateZoneIAMRole: + description: privateZoneIAMRole contains the ARN of + an IAM role that should be assumed when performing + operations on the cluster's private hosted zone + specified in the cluster DNS config. When left empty, + no role should be assumed. + pattern: ^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$ + type: string + type: object + type: + description: "type is the underlying infrastructure provider + for the cluster. Allowed values: \"\", \"AWS\". \n Individual + components may not support all platforms, and must handle + unrecognized platforms with best-effort defaults." + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + x-kubernetes-validations: + - message: allowed values are '' and 'AWS' + rule: self in ['','AWS'] + required: + - type + type: object + x-kubernetes-validations: + - message: aws configuration is required when platform is + AWS, and forbidden otherwise + rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) + : !has(self.aws)' + privateZone: + description: "privateZone is the location where all the DNS + records that are only available internally to the cluster + exist. \n If this field is nil, no private records should + be created. \n Once set, this field cannot be changed." + properties: + id: + description: "id is the identifier that can be used to + find the DNS hosted zone. \n on AWS zone can be fetched + using `ID` as id in [1] on Azure zone can be fetched + using `ID` as a pre-determined name in [2], on GCP zone + can be fetched using `ID` as a pre-determined name in + [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + additionalProperties: + type: string + description: "tags can be used to query the DNS hosted + zone. \n on AWS, resourcegroupstaggingapi [1] can be + used to fetch a zone using `Tags` as tag-filters, \n + [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + type: object + publicZone: + description: "publicZone is the location where all the DNS + records that are publicly accessible to the internet exist. + \n If this field is nil, no public records should be created. + \n Once set, this field cannot be changed." + properties: + id: + description: "id is the identifier that can be used to + find the DNS hosted zone. \n on AWS zone can be fetched + using `ID` as id in [1] on Azure zone can be fetched + using `ID` as a pre-determined name in [2], on GCP zone + can be fetched using `ID` as a pre-determined name in + [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + additionalProperties: + type: string + description: "tags can be used to query the DNS hosted + zone. \n on AWS, resourcegroupstaggingapi [1] can be + used to fetch a zone using `Tags` as tag-filters, \n + [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + type: object + type: object + status: + description: status holds observed values from the cluster. They + may not be overridden. + type: object + required: + - spec + type: object + x-kubernetes-embedded-resource: true + etcdDiscoveryDomain: + description: etcdDiscoveryDomain is deprecated, use Infra.Status.EtcdDiscoveryDomain + instead + type: string + imageRegistryBundleData: + description: imageRegistryBundleData is the ImageRegistryData + items: + description: ImageRegistryBundle contains information for writing + image registry certificates + properties: + data: + description: data holds the contents of the bundle that will + be written to the file location + format: byte + type: string + file: + description: file holds the name of the file where the bundle + will be written to disk + type: string + required: + - data + - file + type: object + type: array + x-kubernetes-list-type: atomic + imageRegistryBundleUserData: + description: imageRegistryBundleUserData is Image Registry Data provided + by the user + items: + description: ImageRegistryBundle contains information for writing + image registry certificates + properties: + data: + description: data holds the contents of the bundle that will + be written to the file location + format: byte + type: string + file: + description: file holds the name of the file where the bundle + will be written to disk + type: string + required: + - data + - file + type: object + type: array + x-kubernetes-list-type: atomic + images: + additionalProperties: + type: string + description: images is map of images that are used by the controller + to render templates under ./templates/ + type: object + infra: + description: infra holds the infrastructure details + nullable: true + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'metadata is the standard object''s metadata. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + spec: + description: spec holds user settable values for configuration + properties: + cloudConfig: + description: "cloudConfig is a reference to a ConfigMap containing + the cloud provider configuration file. This configuration + file is used to configure the Kubernetes cloud provider + integration when using the built-in cloud provider integration + or the external cloud controller manager. The namespace + for this config map is openshift-config. \n cloudConfig + should only be consumed by the kube_cloud_config controller. + The controller is responsible for using the user configuration + in the spec for various platforms and combining that with + the user provided ConfigMap in this field to create a stitched + kube cloud config. The controller generates a ConfigMap + `kube-cloud-config` in `openshift-config-managed` namespace + with the kube cloud config is stored in `cloud.conf` key. + All the clients are expected to use the generated ConfigMap + only." + properties: + key: + description: Key allows pointing to a specific key/value + inside of the configmap. This is useful for logical + file references. + type: string + name: + type: string + type: object + platformSpec: + description: platformSpec holds desired information specific + to the underlying infrastructure provider. + properties: + alibabaCloud: + description: AlibabaCloud contains settings specific to + the Alibaba Cloud infrastructure provider. + type: object + aws: + description: AWS contains settings specific to the Amazon + Web Services infrastructure provider. + properties: + serviceEndpoints: + description: serviceEndpoints list contains custom + endpoints which will override default service endpoint + of AWS Services. There must be only one ServiceEndpoint + for a service. + items: + description: AWSServiceEndpoint store the configuration + of a custom url to override existing defaults + of AWS Services. + properties: + name: + description: name is the name of the AWS service. + The list of all the service names can be found + at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html + This must be provided and cannot be empty. + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with + scheme https, that overrides the default generated + endpoint for a client. This must be provided + and cannot be empty. + pattern: ^https:// + type: string + type: object + type: array + x-kubernetes-list-type: atomic + type: object + azure: + description: Azure contains settings specific to the Azure + infrastructure provider. + type: object + baremetal: + description: BareMetal contains settings specific to the + BareMetal platform. + properties: + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. These are the IPs for a self-hosted + load balancer in front of the API servers. In dual + stack clusters this list contains two IP addresses, + one from IPv4 family and one from IPv6. In single + stack clusters a single IP address is expected. + When omitted, values from the status.apiServerInternalIPs + will be used. Once set, the list cannot be completely + removed (but its second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: apiServerInternalIPs must contain at most + one IPv4 address and at most one IPv6 address + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() + : true' + ingressIPs: + description: ingressIPs are the external IPs which + route to the default ingress controller. The IPs + are suitable targets of a wildcard DNS record used + to resolve default route host names. In dual stack + clusters this list contains two IP addresses, one + from IPv4 family and one from IPv6. In single stack + clusters a single IP address is expected. When omitted, + values from the status.ingressIPs will be used. + Once set, the list cannot be completely removed + (but its second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: ingressIPs must contain at most one IPv4 + address and at most one IPv6 address + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() + : true' + machineNetworks: + description: machineNetworks are IP networks used + to connect all the OpenShift cluster nodes. Each + network is provided in the CIDR format and should + be IPv4 or IPv6, for example "10.0.0.0/8" or "fd00::/8". + items: + description: CIDR is an IP address range in CIDR + notation (for example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 32 + type: array + x-kubernetes-list-type: set + type: object + x-kubernetes-validations: + - message: apiServerInternalIPs list is required once + set + rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' + - message: ingressIPs list is required once set + rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' + equinixMetal: + description: EquinixMetal contains settings specific to + the Equinix Metal infrastructure provider. + type: object + external: + description: ExternalPlatformType represents generic infrastructure + provider. Platform-specific components should be supplemented + separately. + properties: + platformName: + default: Unknown + description: PlatformName holds the arbitrary string + representing the infrastructure provider name, expected + to be set at the installation time. This field is + solely for informational and reporting purposes + and is not expected to be used for decision-making. + type: string + x-kubernetes-validations: + - message: platform name cannot be changed once set + rule: oldSelf == 'Unknown' || self == oldSelf + type: object + gcp: + description: GCP contains settings specific to the Google + Cloud Platform infrastructure provider. + type: object + ibmcloud: + description: IBMCloud contains settings specific to the + IBMCloud infrastructure provider. + type: object + kubevirt: + description: Kubevirt contains settings specific to the + kubevirt infrastructure provider. + type: object + nutanix: + description: Nutanix contains settings specific to the + Nutanix infrastructure provider. + properties: + failureDomains: + description: failureDomains configures failure domains + information for the Nutanix platform. When set, + the failure domains defined here may be used to + spread Machines across prism element clusters to + improve fault tolerance of the cluster. + items: + description: NutanixFailureDomain configures failure + domain information for the Nutanix platform. + properties: + cluster: + description: cluster is to identify the cluster + (the Prism Element under management of the + Prism Central), in which the Machine's VM + will be created. The cluster identifier (uuid + or name) can be obtained from the Prism Central + console or using the prism_central API. + properties: + name: + description: name is the resource name in + the PC. It cannot be empty if the type + is Name. + type: string + type: + description: type is the identifier type + to use for this resource. + enum: + - UUID + - Name + type: string + uuid: + description: uuid is the UUID of the resource + in the PC. It cannot be empty if the type + is UUID. + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: uuid configuration is required when + type is UUID, and forbidden otherwise + rule: 'has(self.type) && self.type == ''UUID'' + ? has(self.uuid) : !has(self.uuid)' + - message: name configuration is required when + type is Name, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Name'' + ? has(self.name) : !has(self.name)' + name: + description: name defines the unique name of + a failure domain. Name is required and must + be at most 64 characters in length. It must + consist of only lower case alphanumeric characters + and hyphens (-). It must start and end with + an alphanumeric character. This value is arbitrary + and is used to identify the failure domain + within the platform. + maxLength: 64 + minLength: 1 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + subnets: + description: subnets holds a list of identifiers + (one or more) of the cluster's network subnets + for the Machine's VM to connect to. The subnet + identifiers (uuid or name) can be obtained + from the Prism Central console or using the + prism_central API. + items: + description: NutanixResourceIdentifier holds + the identity of a Nutanix PC resource (cluster, + image, subnet, etc.) + properties: + name: + description: name is the resource name + in the PC. It cannot be empty if the + type is Name. + type: string + type: + description: type is the identifier type + to use for this resource. + enum: + - UUID + - Name + type: string + uuid: + description: uuid is the UUID of the resource + in the PC. It cannot be empty if the + type is UUID. + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: uuid configuration is required + when type is UUID, and forbidden otherwise + rule: 'has(self.type) && self.type == ''UUID'' + ? has(self.uuid) : !has(self.uuid)' + - message: name configuration is required + when type is Name, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Name'' + ? has(self.name) : !has(self.name)' + maxItems: 1 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + required: + - cluster + - name + - subnets + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + prismCentral: + description: prismCentral holds the endpoint address + and port to access the Nutanix Prism Central. When + a cluster-wide proxy is installed, by default, this + endpoint will be accessed via the proxy. Should + you wish for communication with this endpoint not + to be proxied, please add the endpoint to the proxy + spec.noProxy list. + properties: + address: + description: address is the endpoint address (DNS + name or IP address) of the Nutanix Prism Central + or Element (cluster) + maxLength: 256 + type: string + port: + description: port is the port number to access + the Nutanix Prism Central or Element (cluster) + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - address + - port + type: object + prismElements: + description: prismElements holds one or more endpoint + address and port data to access the Nutanix Prism + Elements (clusters) of the Nutanix Prism Central. + Currently we only support one Prism Element (cluster) + for an OpenShift cluster, where all the Nutanix + resources (VMs, subnets, volumes, etc.) used in + the OpenShift cluster are located. In the future, + we may support Nutanix resources (VMs, etc.) spread + over multiple Prism Elements (clusters) of the Prism + Central. + items: + description: NutanixPrismElementEndpoint holds the + name and endpoint data for a Prism Element (cluster) + properties: + endpoint: + description: endpoint holds the endpoint address + and port data of the Prism Element (cluster). + When a cluster-wide proxy is installed, by + default, this endpoint will be accessed via + the proxy. Should you wish for communication + with this endpoint not to be proxied, please + add the endpoint to the proxy spec.noProxy + list. + properties: + address: + description: address is the endpoint address + (DNS name or IP address) of the Nutanix + Prism Central or Element (cluster) + maxLength: 256 + type: string + port: + description: port is the port number to + access the Nutanix Prism Central or Element + (cluster) + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - address + - port + type: object + name: + description: name is the name of the Prism Element + (cluster). This value will correspond with + the cluster field configured on other resources + (eg Machines, PVCs, etc). + maxLength: 256 + type: string + required: + - endpoint + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - prismCentral + - prismElements + type: object + openstack: + description: OpenStack contains settings specific to the + OpenStack infrastructure provider. + properties: + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. These are the IPs for a self-hosted + load balancer in front of the API servers. In dual + stack clusters this list contains two IP addresses, + one from IPv4 family and one from IPv6. In single + stack clusters a single IP address is expected. + When omitted, values from the status.apiServerInternalIPs + will be used. Once set, the list cannot be completely + removed (but its second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: apiServerInternalIPs must contain at most + one IPv4 address and at most one IPv6 address + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() + : true' + ingressIPs: + description: ingressIPs are the external IPs which + route to the default ingress controller. The IPs + are suitable targets of a wildcard DNS record used + to resolve default route host names. In dual stack + clusters this list contains two IP addresses, one + from IPv4 family and one from IPv6. In single stack + clusters a single IP address is expected. When omitted, + values from the status.ingressIPs will be used. + Once set, the list cannot be completely removed + (but its second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: ingressIPs must contain at most one IPv4 + address and at most one IPv6 address + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() + : true' + machineNetworks: + description: machineNetworks are IP networks used + to connect all the OpenShift cluster nodes. Each + network is provided in the CIDR format and should + be IPv4 or IPv6, for example "10.0.0.0/8" or "fd00::/8". + items: + description: CIDR is an IP address range in CIDR + notation (for example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 32 + type: array + x-kubernetes-list-type: set + type: object + x-kubernetes-validations: + - message: apiServerInternalIPs list is required once + set + rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' + - message: ingressIPs list is required once set + rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' + ovirt: + description: Ovirt contains settings specific to the oVirt + infrastructure provider. + type: object + powervs: + description: PowerVS contains settings specific to the + IBM Power Systems Virtual Servers infrastructure provider. + properties: + serviceEndpoints: + description: serviceEndpoints is a list of custom + endpoints which will override the default service + endpoints of a Power VS service. + items: + description: PowervsServiceEndpoint stores the configuration + of a custom url to override existing defaults + of PowerVS Services. + properties: + name: + description: name is the name of the Power VS + service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api + ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller + Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with + scheme https, that overrides the default generated + endpoint for a client. This must be provided + and cannot be empty. + format: uri + pattern: ^https:// + type: string + required: + - name + - url + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + type: + description: type is the underlying infrastructure provider + for the cluster. This value controls whether infrastructure + automation such as service load balancers, dynamic volume + provisioning, machine creation and deletion, and other + integrations are enabled. If None, no infrastructure + automation is enabled. Allowed values are "AWS", "Azure", + "BareMetal", "GCP", "Libvirt", "OpenStack", "VSphere", + "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", + "Nutanix" and "None". Individual components may not + support all platforms, and must handle unrecognized + platforms as None if they do not support that platform. + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + vsphere: + description: VSphere contains settings specific to the + VSphere infrastructure provider. + properties: + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. These are the IPs for a self-hosted + load balancer in front of the API servers. In dual + stack clusters this list contains two IP addresses, + one from IPv4 family and one from IPv6. In single + stack clusters a single IP address is expected. + When omitted, values from the status.apiServerInternalIPs + will be used. Once set, the list cannot be completely + removed (but its second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: apiServerInternalIPs must contain at most + one IPv4 address and at most one IPv6 address + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() + : true' + failureDomains: + description: failureDomains contains the definition + of region, zone and the vCenter topology. If this + is omitted failure domains (regions and zones) will + not be used. + items: + description: VSpherePlatformFailureDomainSpec holds + the region and zone failure domain and the vCenter + topology of that failure domain. + properties: + name: + description: name defines the arbitrary but + unique name of a failure domain. + maxLength: 256 + minLength: 1 + type: string + region: + description: region defines the name of a region + tag that will be attached to a vCenter datacenter. + The tag category in vCenter must be named + openshift-region. + maxLength: 80 + minLength: 1 + type: string + server: + description: server is the fully-qualified domain + name or the IP address of the vCenter server. + --- + maxLength: 255 + minLength: 1 + type: string + topology: + description: Topology describes a given failure + domain using vSphere constructs + properties: + computeCluster: + description: computeCluster the absolute + path of the vCenter cluster in which virtual + machine will be located. The absolute + path is of the form //host/. + The maximum length of the path is 2048 + characters. + maxLength: 2048 + pattern: ^/.*?/host/.*? + type: string + datacenter: + description: datacenter is the name of vCenter + datacenter in which virtual machines will + be located. The maximum length of the + datacenter name is 80 characters. + maxLength: 80 + type: string + datastore: + description: datastore is the absolute path + of the datastore in which the virtual + machine is located. The absolute path + is of the form //datastore/ + The maximum length of the path is 2048 + characters. + maxLength: 2048 + pattern: ^/.*?/datastore/.*? + type: string + folder: + description: folder is the absolute path + of the folder where virtual machines are + located. The absolute path is of the form + //vm/. The maximum + length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/vm/.*? + type: string + networks: + description: networks is the list of port + group network names within this failure + domain. Currently, we only support a single + interface per RHCOS virtual machine. The + available networks (port groups) can be + listed using `govc ls 'network/*'` The + single interface should be the absolute + path of the form //network/. + items: + type: string + maxItems: 1 + minItems: 1 + type: array + x-kubernetes-list-type: atomic + resourcePool: + description: resourcePool is the absolute + path of the resource pool where virtual + machines will be created. The absolute + path is of the form //host//Resources/. + The maximum length of the path is 2048 + characters. + maxLength: 2048 + pattern: ^/.*?/host/.*?/Resources.* + type: string + template: + description: "template is the full inventory + path of the virtual machine or template + that will be cloned when creating new + machines in this failure domain. The maximum + length of the path is 2048 characters. + \n When omitted, the template will be + calculated by the control plane machineset + operator based on the region and zone + defined in VSpherePlatformFailureDomainSpec. + For example, for zone=zonea, region=region1, + and infrastructure name=test, the template + path would be calculated as //vm/test-rhcos-region1-zonea." + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string + required: + - computeCluster + - datacenter + - datastore + - networks + type: object + zone: + description: zone defines the name of a zone + tag that will be attached to a vCenter cluster. + The tag category in vCenter must be named + openshift-zone. + maxLength: 80 + minLength: 1 + type: string + required: + - name + - region + - server + - topology + - zone + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + ingressIPs: + description: ingressIPs are the external IPs which + route to the default ingress controller. The IPs + are suitable targets of a wildcard DNS record used + to resolve default route host names. In dual stack + clusters this list contains two IP addresses, one + from IPv4 family and one from IPv6. In single stack + clusters a single IP address is expected. When omitted, + values from the status.ingressIPs will be used. + Once set, the list cannot be completely removed + (but its second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: ingressIPs must contain at most one IPv4 + address and at most one IPv6 address + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() + : true' + machineNetworks: + description: machineNetworks are IP networks used + to connect all the OpenShift cluster nodes. Each + network is provided in the CIDR format and should + be IPv4 or IPv6, for example "10.0.0.0/8" or "fd00::/8". + items: + description: CIDR is an IP address range in CIDR + notation (for example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 32 + type: array + x-kubernetes-list-type: set + nodeNetworking: + description: nodeNetworking contains the definition + of internal and external network constraints for + assigning the node's networking. If this field is + omitted, networking defaults to the legacy address + selection behavior which is to only support a single + address and return the first one found. + properties: + external: + description: external represents the network configuration + of the node that is externally routable. + properties: + excludeNetworkSubnetCidr: + description: excludeNetworkSubnetCidr IP addresses + in subnet ranges will be excluded when selecting + the IP address from the VirtualMachine's + VM for use in the status.addresses fields. + --- + items: + type: string + type: array + x-kubernetes-list-type: atomic + network: + description: network VirtualMachine's VM Network + names that will be used to when searching + for status.addresses fields. Note that if + internal.networkSubnetCIDR and external.networkSubnetCIDR + are not set, then the vNIC associated to + this network must only have a single IP + address assigned to it. The available networks + (port groups) can be listed using `govc + ls 'network/*'` + type: string + networkSubnetCidr: + description: networkSubnetCidr IP address + on VirtualMachine's network interfaces included + in the fields' CIDRs that will be used in + respective status.addresses fields. --- + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + internal: + description: internal represents the network configuration + of the node that is routable only within the + cluster. + properties: + excludeNetworkSubnetCidr: + description: excludeNetworkSubnetCidr IP addresses + in subnet ranges will be excluded when selecting + the IP address from the VirtualMachine's + VM for use in the status.addresses fields. + --- + items: + type: string + type: array + x-kubernetes-list-type: atomic + network: + description: network VirtualMachine's VM Network + names that will be used to when searching + for status.addresses fields. Note that if + internal.networkSubnetCIDR and external.networkSubnetCIDR + are not set, then the vNIC associated to + this network must only have a single IP + address assigned to it. The available networks + (port groups) can be listed using `govc + ls 'network/*'` + type: string + networkSubnetCidr: + description: networkSubnetCidr IP address + on VirtualMachine's network interfaces included + in the fields' CIDRs that will be used in + respective status.addresses fields. --- + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + vcenters: + description: vcenters holds the connection details + for services to communicate with vCenter. Currently, + only a single vCenter is supported. --- + items: + description: VSpherePlatformVCenterSpec stores the + vCenter connection fields. This is used by the + vSphere CCM. + properties: + datacenters: + description: The vCenter Datacenters in which + the RHCOS vm guests are located. This field + will be used by the Cloud Controller Manager. + Each datacenter listed here should be used + within a topology. + items: + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + port: + description: port is the TCP port that will + be used to communicate to the vCenter endpoint. + When omitted, this means the user has no opinion + and it is up to the platform to choose a sensible + default, which is subject to change over time. + format: int32 + maximum: 32767 + minimum: 1 + type: integer + server: + description: server is the fully-qualified domain + name or the IP address of the vCenter server. + --- + maxLength: 255 + type: string + required: + - datacenters + - server + type: object + maxItems: 1 + minItems: 0 + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-validations: + - message: apiServerInternalIPs list is required once + set + rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' + - message: ingressIPs list is required once set + rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' + type: object + type: object + status: + description: status holds observed values from the cluster. They + may not be overridden. + properties: + apiServerInternalURI: + description: apiServerInternalURL is a valid URI with scheme + 'https', address and optionally a port (defaulting to 443). apiServerInternalURL + can be used by components like kubelets, to contact the + Kubernetes API server using the infrastructure provider + rather than Kubernetes networking. + type: string + apiServerURL: + description: apiServerURL is a valid URI with scheme 'https', + address and optionally a port (defaulting to 443). apiServerURL + can be used by components like the web console to tell users + where to find the Kubernetes API. + type: string + controlPlaneTopology: + default: HighlyAvailable + description: controlPlaneTopology expresses the expectations + for operands that normally run on control nodes. The default + is 'HighlyAvailable', which represents the behavior operators + have in a "normal" cluster. The 'SingleReplica' mode will + be used in single-node deployments and the operators should + not configure the operand for highly-available operation + The 'External' mode indicates that the control plane is + hosted externally to the cluster and that its components + are not visible within the cluster. + enum: + - HighlyAvailable + - SingleReplica + - External + type: string + cpuPartitioning: + default: None + description: cpuPartitioning expresses if CPU partitioning + is a currently enabled feature in the cluster. CPU Partitioning + means that this cluster can support partitioning workloads + to specific CPU Sets. Valid values are "None" and "AllNodes". + When omitted, the default value is "None". The default value + of "None" indicates that no nodes will be setup with CPU + partitioning. The "AllNodes" value indicates that all nodes + have been setup with CPU partitioning, and can then be further + configured via the PerformanceProfile API. + enum: + - None + - AllNodes + type: string + etcdDiscoveryDomain: + description: 'etcdDiscoveryDomain is the domain used to fetch + the SRV records for discovering etcd servers and clients. + For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery + deprecated: as of 4.7, this field is no longer set or honored. It + will be removed in a future release.' + type: string + infrastructureName: + description: infrastructureName uniquely identifies a cluster + with a human friendly name. Once set it should not be changed. + Must be of max length 27 and must have only alphanumeric + or hyphen characters. + type: string + infrastructureTopology: + default: HighlyAvailable + description: 'infrastructureTopology expresses the expectations + for infrastructure services that do not run on control plane + nodes, usually indicated by a node selector for a `role` + value other than `master`. The default is ''HighlyAvailable'', + which represents the behavior operators have in a "normal" + cluster. The ''SingleReplica'' mode will be used in single-node + deployments and the operators should not configure the operand + for highly-available operation NOTE: External topology mode + is not applicable for this field.' + enum: + - HighlyAvailable + - SingleReplica + type: string + platform: + description: "platform is the underlying infrastructure provider + for the cluster. \n Deprecated: Use platformStatus.type + instead." + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + platformStatus: + description: platformStatus holds status information specific + to the underlying infrastructure provider. + properties: + alibabaCloud: + description: AlibabaCloud contains settings specific to + the Alibaba Cloud infrastructure provider. + properties: + region: + description: region specifies the region for Alibaba + Cloud resources created for the cluster. + pattern: ^[0-9A-Za-z-]+$ + type: string + resourceGroupID: + description: resourceGroupID is the ID of the resource + group for the cluster. + pattern: ^(rg-[0-9A-Za-z]+)?$ + type: string + resourceTags: + description: resourceTags is a list of additional + tags to apply to Alibaba Cloud resources created + for the cluster. + items: + description: AlibabaCloudResourceTag is the set + of tags to add to apply to resources. + properties: + key: + description: key is the key of the tag. + maxLength: 128 + minLength: 1 + type: string + value: + description: value is the value of the tag. + maxLength: 128 + minLength: 1 + type: string + required: + - key + - value + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + required: + - region + type: object + aws: + description: AWS contains settings specific to the Amazon + Web Services infrastructure provider. + properties: + region: + description: region holds the default AWS region for + new AWS resources created by the cluster. + type: string + resourceTags: + description: resourceTags is a list of additional + tags to apply to AWS resources created for the cluster. + See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html + for information on tagging AWS resources. AWS supports + a maximum of 50 tags per resource. OpenShift reserves + 25 tags for its use, leaving 25 tags available for + the user. + items: + description: AWSResourceTag is a tag to apply to + AWS resources created for the cluster. + properties: + key: + description: key is the key of the tag + maxLength: 128 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + type: string + value: + description: value is the value of the tag. + Some AWS service do not support empty values. + Since tags are added to resources in many + services, the length of the tag value must + meet the requirements of all services. + maxLength: 256 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + type: string + required: + - key + - value + type: object + maxItems: 25 + type: array + x-kubernetes-list-type: atomic + serviceEndpoints: + description: ServiceEndpoints list contains custom + endpoints which will override default service endpoint + of AWS Services. There must be only one ServiceEndpoint + for a service. + items: + description: AWSServiceEndpoint store the configuration + of a custom url to override existing defaults + of AWS Services. + properties: + name: + description: name is the name of the AWS service. + The list of all the service names can be found + at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html + This must be provided and cannot be empty. + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with + scheme https, that overrides the default generated + endpoint for a client. This must be provided + and cannot be empty. + pattern: ^https:// + type: string + type: object + type: array + x-kubernetes-list-type: atomic + type: object + azure: + description: Azure contains settings specific to the Azure + infrastructure provider. + properties: + armEndpoint: + description: armEndpoint specifies a URL to use for + resource management in non-soverign clouds such + as Azure Stack. + type: string + cloudName: + description: cloudName is the name of the Azure cloud + environment which can be used to configure the Azure + SDK with the appropriate Azure API endpoints. If + empty, the value is equal to `AzurePublicCloud`. + enum: + - "" + - AzurePublicCloud + - AzureUSGovernmentCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureStackCloud + type: string + networkResourceGroupName: + description: networkResourceGroupName is the Resource + Group for network resources like the Virtual Network + and Subnets used by the cluster. If empty, the value + is same as ResourceGroupName. + type: string + resourceGroupName: + description: resourceGroupName is the Resource Group + for new Azure resources created for the cluster. + type: string + resourceTags: + description: resourceTags is a list of additional + tags to apply to Azure resources created for the + cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags + for information on tagging Azure resources. Due + to limitations on Automation, Content Delivery Network, + DNS Azure resources, a maximum of 15 tags may be + applied. OpenShift reserves 5 tags for internal + use, allowing 10 tags for user configuration. + items: + description: AzureResourceTag is a tag to apply + to Azure resources created for the cluster. + properties: + key: + description: key is the key part of the tag. + A tag key can have a maximum of 128 characters + and cannot be empty. Key must begin with a + letter, end with a letter, number or underscore, + and must contain only alphanumeric characters + and the following special characters `_ . + -`. + maxLength: 128 + minLength: 1 + pattern: ^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$ + type: string + value: + description: 'value is the value part of the + tag. A tag value can have a maximum of 256 + characters and cannot be empty. Value must + contain only alphanumeric characters and the + following special characters `_ + , - . / + : ; < = > ? @`.' + maxLength: 256 + minLength: 1 + pattern: ^[0-9A-Za-z_.=+-@]+$ + type: string + required: + - key + - value + type: object + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: resourceTags are immutable and may only + be configured during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, + x in self) + type: object + x-kubernetes-validations: + - message: resourceTags may only be configured during + installation + rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) + || has(oldSelf.resourceTags) && has(self.resourceTags)' + baremetal: + description: BareMetal contains settings specific to the + BareMetal platform. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. It is the IP that the Infrastructure.status.apiServerInternalURI + points to. It is the IP for a self-hosted load balancer + in front of the API servers. \n Deprecated: Use + APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. These are the IPs for a self-hosted + load balancer in front of the API servers. In dual + stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + ingressIP: + description: "ingressIP is an external IP which routes + to the default ingress controller. The IP is a suitable + target of a wildcard DNS record used to resolve + default route host names. \n Deprecated: Use IngressIPs + instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which + route to the default ingress controller. The IPs + are suitable targets of a wildcard DNS record used + to resolve default route host names. In dual stack + clusters this list contains two IPs otherwise only + one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer + used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer + used by the cluster on BareMetal platform which + can be a user-managed or openshift-managed load + balancer that is to be used for the OpenShift + API and Ingress endpoints. When set to OpenShiftManagedDefault + the static pods in charge of API and Ingress + traffic load-balancing defined in the machine + config operator will be deployed. When set to + UserManaged these static pods will not be deployed + and it is expected that the load balancer is + configured out of band by the deployer. When + omitted, this means no opinion and the platform + is left to choose a reasonable default. The + default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + machineNetworks: + description: machineNetworks are IP networks used + to connect all the OpenShift cluster nodes. + items: + description: CIDR is an IP address range in CIDR + notation (for example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 32 + type: array + x-kubernetes-list-type: set + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal + DNS used by the nodes. Unlike the one managed by + the DNS operator, `NodeDNSIP` provides name resolution + for the nodes themselves. There is no DNS-as-a-service + for BareMetal deployments. In order to minimize + necessary changes to the datacenter DNS, a DNS service + is hosted as a static pod to serve those hostnames + to the nodes in the cluster. + type: string + type: object + equinixMetal: + description: EquinixMetal contains settings specific to + the Equinix Metal infrastructure provider. + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. It is the IP that the Infrastructure.status.apiServerInternalURI + points to. It is the IP for a self-hosted load balancer + in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes + to the default ingress controller. The IP is a suitable + target of a wildcard DNS record used to resolve + default route host names. + type: string + type: object + external: + description: External contains settings specific to the + generic External infrastructure provider. + properties: + cloudControllerManager: + description: cloudControllerManager contains settings + specific to the external Cloud Controller Manager + (a.k.a. CCM or CPI). When omitted, new nodes will + be not tainted and no extra initialization from + the cloud controller manager is expected. + properties: + state: + description: "state determines whether or not + an external Cloud Controller Manager is expected + to be installed within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager + \n Valid values are \"External\", \"None\" and + omitted. When set to \"External\", new nodes + will be tainted as uninitialized when created, + preventing them from running workloads until + they are initialized by the cloud controller + manager. When omitted or set to \"None\", new + nodes will be not tainted and no extra initialization + from the cloud controller manager is expected." + enum: + - "" + - External + - None + type: string + x-kubernetes-validations: + - message: state is immutable once set + rule: self == oldSelf + type: object + x-kubernetes-validations: + - message: state may not be added or removed once + set + rule: (has(self.state) == has(oldSelf.state)) || + (!has(oldSelf.state) && self.state != "External") + type: object + x-kubernetes-validations: + - message: cloudControllerManager may not be added or + removed once set + rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) + gcp: + description: GCP contains settings specific to the Google + Cloud Platform infrastructure provider. + properties: + cloudLoadBalancerConfig: + default: + dnsType: PlatformDefault + description: cloudLoadBalancerConfig is a union that + contains the IP addresses of API, API-Int and Ingress + Load Balancers created on the cloud platform. These + values would not be populated on on-prem platforms. + These Load Balancer IPs are used to configure the + in-cluster DNS instances for API, API-Int and Ingress + services. `dnsType` is expected to be set to `ClusterHosted` + when these Load Balancer IP addresses are populated + and used. + nullable: true + properties: + clusterHosted: + description: clusterHosted holds the IP addresses + of API, API-Int and Ingress Load Balancers on + Cloud Platforms. The DNS solution hosted within + the cluster use these IP addresses to provide + resolution for API, API-Int and Ingress services. + properties: + apiIntLoadBalancerIPs: + description: apiIntLoadBalancerIPs holds Load + Balancer IPs for the internal API service. + These Load Balancer IP addresses can be + IPv4 and/or IPv6 addresses. Entries in the + apiIntLoadBalancerIPs must be unique. A + maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, + "10.0.0.0" or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + apiLoadBalancerIPs: + description: apiLoadBalancerIPs holds Load + Balancer IPs for the API service. These + Load Balancer IP addresses can be IPv4 and/or + IPv6 addresses. Could be empty for private + clusters. Entries in the apiLoadBalancerIPs + must be unique. A maximum of 16 IP addresses + are permitted. + format: ip + items: + description: IP is an IP address (for example, + "10.0.0.0" or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + ingressLoadBalancerIPs: + description: ingressLoadBalancerIPs holds + IPs for Ingress Load Balancers. These Load + Balancer IP addresses can be IPv4 and/or + IPv6 addresses. Entries in the ingressLoadBalancerIPs + must be unique. A maximum of 16 IP addresses + are permitted. + format: ip + items: + description: IP is an IP address (for example, + "10.0.0.0" or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + type: object + dnsType: + default: PlatformDefault + description: dnsType indicates the type of DNS + solution in use within the cluster. Its default + value of `PlatformDefault` indicates that the + cluster's DNS is the default provided by the + cloud platform. It can be set to `ClusterHosted` + to bypass the configuration of the cloud default + DNS. In this mode, the cluster needs to provide + a self-hosted DNS solution for the cluster's + installation to succeed. The cluster's use of + the cloud's Load Balancers is unaffected by + this setting. The value is immutable after it + has been set at install time. Currently, there + is no way for the customer to add additional + DNS entries into the cluster hosted DNS. Enabling + this functionality allows the user to start + their own DNS solution outside the cluster after + installation is complete. The customer would + be responsible for configuring this custom DNS + solution, and it can be run in addition to the + in-cluster DNS solution. + enum: + - ClusterHosted + - PlatformDefault + type: string + x-kubernetes-validations: + - message: dnsType is immutable + rule: oldSelf == '' || self == oldSelf + type: object + x-kubernetes-validations: + - message: clusterHosted is permitted only when dnsType + is ClusterHosted + rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' + ? !has(self.clusterHosted) : true' + projectID: + description: resourceGroupName is the Project ID for + new GCP resources created for the cluster. + type: string + region: + description: region holds the region for new GCP resources + created for the cluster. + type: string + resourceLabels: + description: resourceLabels is a list of additional + labels to apply to GCP resources created for the + cluster. See https://cloud.google.com/compute/docs/labeling-resources + for information on labeling GCP resources. GCP supports + a maximum of 64 labels per resource. OpenShift reserves + 32 labels for internal use, allowing 32 labels for + user configuration. + items: + description: GCPResourceLabel is a label to apply + to GCP resources created for the cluster. + properties: + key: + description: key is the key part of the label. + A label key can have a maximum of 63 characters + and cannot be empty. Label key must begin + with a lowercase letter, and must contain + only lowercase letters, numeric characters, + and the following special characters `_-`. + Label key must not have the reserved prefixes + `kubernetes-io` and `openshift-io`. + maxLength: 63 + minLength: 1 + pattern: ^[a-z][0-9a-z_-]{0,62}$ + type: string + x-kubernetes-validations: + - message: label keys must not start with either + `openshift-io` or `kubernetes-io` + rule: '!self.startsWith(''openshift-io'') + && !self.startsWith(''kubernetes-io'')' + value: + description: value is the value part of the + label. A label value can have a maximum of + 63 characters and cannot be empty. Value must + contain only lowercase letters, numeric characters, + and the following special characters `_-`. + maxLength: 63 + minLength: 1 + pattern: ^[0-9a-z_-]{1,63}$ + type: string + required: + - key + - value + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: resourceLabels are immutable and may only + be configured during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, + x in self) + resourceTags: + description: resourceTags is a list of additional + tags to apply to GCP resources created for the cluster. + See https://cloud.google.com/resource-manager/docs/tags/tags-overview + for information on tagging GCP resources. GCP supports + a maximum of 50 tags per resource. + items: + description: GCPResourceTag is a tag to apply to + GCP resources created for the cluster. + properties: + key: + description: key is the key part of the tag. + A tag key can have a maximum of 63 characters + and cannot be empty. Tag key must begin and + end with an alphanumeric character, and must + contain only uppercase, lowercase alphanumeric + characters, and the following special characters + `._-`. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$ + type: string + parentID: + description: 'parentID is the ID of the hierarchical + resource where the tags are defined, e.g. + at the Organization or the Project level. + To find the Organization or Project ID refer + to the following pages: https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, + https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. + An OrganizationID must consist of decimal + numbers, and cannot have leading zeroes. A + ProjectID must be 6 to 30 characters in length, + can only contain lowercase letters, numbers, + and hyphens, and must start with a letter, + and cannot end with a hyphen.' + maxLength: 32 + minLength: 1 + pattern: (^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$) + type: string + value: + description: value is the value part of the + tag. A tag value can have a maximum of 63 + characters and cannot be empty. Tag value + must begin and end with an alphanumeric character, + and must contain only uppercase, lowercase + alphanumeric characters, and the following + special characters `_-.@%=+:,*#&(){}[]` and + spaces. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$ + type: string + required: + - key + - parentID + - value + type: object + maxItems: 50 + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: resourceTags are immutable and may only + be configured during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, + x in self) + type: object + x-kubernetes-validations: + - message: resourceLabels may only be configured during + installation + rule: '!has(oldSelf.resourceLabels) && !has(self.resourceLabels) + || has(oldSelf.resourceLabels) && has(self.resourceLabels)' + - message: resourceTags may only be configured during + installation + rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) + || has(oldSelf.resourceTags) && has(self.resourceTags)' + ibmcloud: + description: IBMCloud contains settings specific to the + IBMCloud infrastructure provider. + properties: + cisInstanceCRN: + description: CISInstanceCRN is the CRN of the Cloud + Internet Services instance managing the DNS zone + for the cluster's base domain + type: string + dnsInstanceCRN: + description: DNSInstanceCRN is the CRN of the DNS + Services instance managing the DNS zone for the + cluster's base domain + type: string + location: + description: Location is where the cluster has been + deployed + type: string + providerType: + description: ProviderType indicates the type of cluster + that was created + type: string + resourceGroupName: + description: ResourceGroupName is the Resource Group + for new IBMCloud resources created for the cluster. + type: string + serviceEndpoints: + description: serviceEndpoints is a list of custom + endpoints which will override the default service + endpoints of an IBM Cloud service. These endpoints + are consumed by components within the cluster to + reach the respective IBM Cloud Services. + items: + description: IBMCloudServiceEndpoint stores the + configuration of a custom url to override existing + defaults of IBM Cloud Services. + properties: + name: + description: 'name is the name of the IBM Cloud + service. Possible values are: CIS, COS, DNSServices, + GlobalSearch, GlobalTagging, HyperProtect, + IAM, KeyProtect, ResourceController, ResourceManager, + or VPC. For example, the IBM Cloud Private + IAM service could be configured with the service + `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` + Whereas the IBM Cloud Private VPC service + for US South (Dallas) could be configured + with the service `name` of `VPC` and `url` + of `https://us.south.private.iaas.cloud.ibm.com`' + enum: + - CIS + - COS + - DNSServices + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - ResourceController + - ResourceManager + - VPC + type: string + url: + description: url is fully qualified URI with + scheme https, that overrides the default generated + endpoint for a client. This must be provided + and cannot be empty. + type: string + x-kubernetes-validations: + - message: url must be a valid absolute URL + rule: isURL(self) + required: + - name + - url + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + kubevirt: + description: Kubevirt contains settings specific to the + kubevirt infrastructure provider. + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. It is the IP that the Infrastructure.status.apiServerInternalURI + points to. It is the IP for a self-hosted load balancer + in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes + to the default ingress controller. The IP is a suitable + target of a wildcard DNS record used to resolve + default route host names. + type: string + type: object + nutanix: + description: Nutanix contains settings specific to the + Nutanix infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. It is the IP that the Infrastructure.status.apiServerInternalURI + points to. It is the IP for a self-hosted load balancer + in front of the API servers. \n Deprecated: Use + APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. These are the IPs for a self-hosted + load balancer in front of the API servers. In dual + stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + ingressIP: + description: "ingressIP is an external IP which routes + to the default ingress controller. The IP is a suitable + target of a wildcard DNS record used to resolve + default route host names. \n Deprecated: Use IngressIPs + instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which + route to the default ingress controller. The IPs + are suitable targets of a wildcard DNS record used + to resolve default route host names. In dual stack + clusters this list contains two IPs otherwise only + one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer + used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer + used by the cluster on Nutanix platform which + can be a user-managed or openshift-managed load + balancer that is to be used for the OpenShift + API and Ingress endpoints. When set to OpenShiftManagedDefault + the static pods in charge of API and Ingress + traffic load-balancing defined in the machine + config operator will be deployed. When set to + UserManaged these static pods will not be deployed + and it is expected that the load balancer is + configured out of band by the deployer. When + omitted, this means no opinion and the platform + is left to choose a reasonable default. The + default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + type: object + openstack: + description: OpenStack contains settings specific to the + OpenStack infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. It is the IP that the Infrastructure.status.apiServerInternalURI + points to. It is the IP for a self-hosted load balancer + in front of the API servers. \n Deprecated: Use + APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. These are the IPs for a self-hosted + load balancer in front of the API servers. In dual + stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + cloudName: + description: cloudName is the name of the desired + OpenStack cloud in the client configuration file + (`clouds.yaml`). + type: string + ingressIP: + description: "ingressIP is an external IP which routes + to the default ingress controller. The IP is a suitable + target of a wildcard DNS record used to resolve + default route host names. \n Deprecated: Use IngressIPs + instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which + route to the default ingress controller. The IPs + are suitable targets of a wildcard DNS record used + to resolve default route host names. In dual stack + clusters this list contains two IPs otherwise only + one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer + used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer + used by the cluster on OpenStack platform which + can be a user-managed or openshift-managed load + balancer that is to be used for the OpenShift + API and Ingress endpoints. When set to OpenShiftManagedDefault + the static pods in charge of API and Ingress + traffic load-balancing defined in the machine + config operator will be deployed. When set to + UserManaged these static pods will not be deployed + and it is expected that the load balancer is + configured out of band by the deployer. When + omitted, this means no opinion and the platform + is left to choose a reasonable default. The + default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + machineNetworks: + description: machineNetworks are IP networks used + to connect all the OpenShift cluster nodes. + items: + description: CIDR is an IP address range in CIDR + notation (for example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 32 + type: array + x-kubernetes-list-type: set + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal + DNS used by the nodes. Unlike the one managed by + the DNS operator, `NodeDNSIP` provides name resolution + for the nodes themselves. There is no DNS-as-a-service + for OpenStack deployments. In order to minimize + necessary changes to the datacenter DNS, a DNS service + is hosted as a static pod to serve those hostnames + to the nodes in the cluster. + type: string + type: object + ovirt: + description: Ovirt contains settings specific to the oVirt + infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. It is the IP that the Infrastructure.status.apiServerInternalURI + points to. It is the IP for a self-hosted load balancer + in front of the API servers. \n Deprecated: Use + APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. These are the IPs for a self-hosted + load balancer in front of the API servers. In dual + stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + ingressIP: + description: "ingressIP is an external IP which routes + to the default ingress controller. The IP is a suitable + target of a wildcard DNS record used to resolve + default route host names. \n Deprecated: Use IngressIPs + instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which + route to the default ingress controller. The IPs + are suitable targets of a wildcard DNS record used + to resolve default route host names. In dual stack + clusters this list contains two IPs otherwise only + one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer + used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer + used by the cluster on Ovirt platform which + can be a user-managed or openshift-managed load + balancer that is to be used for the OpenShift + API and Ingress endpoints. When set to OpenShiftManagedDefault + the static pods in charge of API and Ingress + traffic load-balancing defined in the machine + config operator will be deployed. When set to + UserManaged these static pods will not be deployed + and it is expected that the load balancer is + configured out of band by the deployer. When + omitted, this means no opinion and the platform + is left to choose a reasonable default. The + default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + nodeDNSIP: + description: 'deprecated: as of 4.6, this field is + no longer set or honored. It will be removed in + a future release.' + type: string + type: object + powervs: + description: PowerVS contains settings specific to the + Power Systems Virtual Servers infrastructure provider. + properties: + cisInstanceCRN: + description: CISInstanceCRN is the CRN of the Cloud + Internet Services instance managing the DNS zone + for the cluster's base domain + type: string + dnsInstanceCRN: + description: DNSInstanceCRN is the CRN of the DNS + Services instance managing the DNS zone for the + cluster's base domain + type: string + region: + description: region holds the default Power VS region + for new Power VS resources created by the cluster. + type: string + resourceGroup: + description: 'resourceGroup is the resource group + name for new IBMCloud resources created for a cluster. + The resource group specified here will be used by + cluster-image-registry-operator to set up a COS + Instance in IBMCloud for the cluster registry. More + about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. + When omitted, the image registry operator won''t + be able to configure storage, which results in the + image registry cluster operator not being in an + available state.' + maxLength: 40 + pattern: ^[a-zA-Z0-9-_ ]+$ + type: string + x-kubernetes-validations: + - message: resourceGroup is immutable once set + rule: oldSelf == '' || self == oldSelf + serviceEndpoints: + description: serviceEndpoints is a list of custom + endpoints which will override the default service + endpoints of a Power VS service. + items: + description: PowervsServiceEndpoint stores the configuration + of a custom url to override existing defaults + of PowerVS Services. + properties: + name: + description: name is the name of the Power VS + service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api + ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller + Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with + scheme https, that overrides the default generated + endpoint for a client. This must be provided + and cannot be empty. + format: uri + pattern: ^https:// + type: string + required: + - name + - url + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + zone: + description: 'zone holds the default zone for the + new Power VS resources created by the cluster. Note: + Currently only single-zone OCP clusters are supported' + type: string + type: object + x-kubernetes-validations: + - message: cannot unset resourceGroup once set + rule: '!has(oldSelf.resourceGroup) || has(self.resourceGroup)' + type: + description: "type is the underlying infrastructure provider + for the cluster. This value controls whether infrastructure + automation such as service load balancers, dynamic volume + provisioning, machine creation and deletion, and other + integrations are enabled. If None, no infrastructure + automation is enabled. Allowed values are \"AWS\", \"Azure\", + \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", + \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", + \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual + components may not support all platforms, and must handle + unrecognized platforms as None if they do not support + that platform. \n This value will be synced with to + the `status.platform` and `status.platformStatus.type`. + Currently this value cannot be changed once set." + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + vsphere: + description: VSphere contains settings specific to the + VSphere infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. It is the IP that the Infrastructure.status.apiServerInternalURI + points to. It is the IP for a self-hosted load balancer + in front of the API servers. \n Deprecated: Use + APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. These are the IPs for a self-hosted + load balancer in front of the API servers. In dual + stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + ingressIP: + description: "ingressIP is an external IP which routes + to the default ingress controller. The IP is a suitable + target of a wildcard DNS record used to resolve + default route host names. \n Deprecated: Use IngressIPs + instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which + route to the default ingress controller. The IPs + are suitable targets of a wildcard DNS record used + to resolve default route host names. In dual stack + clusters this list contains two IPs otherwise only + one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer + used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer + used by the cluster on VSphere platform which + can be a user-managed or openshift-managed load + balancer that is to be used for the OpenShift + API and Ingress endpoints. When set to OpenShiftManagedDefault + the static pods in charge of API and Ingress + traffic load-balancing defined in the machine + config operator will be deployed. When set to + UserManaged these static pods will not be deployed + and it is expected that the load balancer is + configured out of band by the deployer. When + omitted, this means no opinion and the platform + is left to choose a reasonable default. The + default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + machineNetworks: + description: machineNetworks are IP networks used + to connect all the OpenShift cluster nodes. + items: + description: CIDR is an IP address range in CIDR + notation (for example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 32 + type: array + x-kubernetes-list-type: set + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal + DNS used by the nodes. Unlike the one managed by + the DNS operator, `NodeDNSIP` provides name resolution + for the nodes themselves. There is no DNS-as-a-service + for vSphere deployments. In order to minimize necessary + changes to the datacenter DNS, a DNS service is + hosted as a static pod to serve those hostnames + to the nodes in the cluster. + type: string + type: object + type: object + type: object + required: + - spec + type: object + x-kubernetes-embedded-resource: true + internalRegistryPullSecret: + description: internalRegistryPullSecret is the pull secret for the + internal registry, used by rpm-ostree to pull images from the internal + registry if present + format: byte + nullable: true + type: string + ipFamilies: + description: ipFamilies indicates the IP families in use by the cluster + network + type: string + kubeAPIServerServingCAData: + description: kubeAPIServerServingCAData managed Kubelet to API Server + Cert... Rotated automatically + format: byte + type: string + network: + description: Network contains additional network related information + nullable: true + properties: + mtuMigration: + description: MTUMigration contains the MTU migration configuration. + nullable: true + properties: + machine: + description: Machine contains MTU migration configuration + for the machine's uplink. + properties: + from: + description: From is the MTU to migrate from. + format: int32 + minimum: 0 + type: integer + to: + description: To is the MTU to migrate to. + format: int32 + minimum: 0 + type: integer + type: object + network: + description: Network contains MTU migration configuration + for the default network. + properties: + from: + description: From is the MTU to migrate from. + format: int32 + minimum: 0 + type: integer + to: + description: To is the MTU to migrate to. + format: int32 + minimum: 0 + type: integer + type: object + type: object + required: + - mtuMigration + type: object + networkType: + description: 'networkType holds the type of network the cluster is + using XXX: this is temporary and will be dropped as soon as possible + in favor of a better support to start network related services the + proper way. Nobody is also changing this once the cluster is up + and running the first time, so, disallow regeneration if this changes.' + type: string + osImageURL: + description: OSImageURL is the old-format container image that contains + the OS update payload. + type: string + platform: + description: platform is deprecated, use Infra.Status.PlatformStatus.Type + instead + type: string + proxy: + description: proxy holds the current proxy configuration for the nodes + nullable: true + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or + CIDRs for which the proxy should not be used. + type: string + type: object + pullSecret: + description: pullSecret is the default pull secret that needs to be + installed on all machines. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + releaseImage: + description: releaseImage is the image used when installing the cluster + type: string + rootCAData: + description: rootCAData specifies the root CA data + format: byte + type: string + required: + - additionalTrustBundle + - baseOSContainerImage + - cloudProviderCAData + - cloudProviderConfig + - clusterDNSIP + - dns + - images + - infra + - ipFamilies + - kubeAPIServerServingCAData + - network + - proxy + - releaseImage + - rootCAData + type: object + status: + description: ControllerConfigStatus is the status for ControllerConfig + properties: + conditions: + description: conditions represents the latest available observations + of current state. + items: + description: ControllerConfigStatusCondition contains condition + information for ControllerConfigStatus + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update + to the current status object. + format: date-time + nullable: true + type: string + message: + description: message provides additional information about the + current condition. This is only to be consumed by humans. + type: string + reason: + description: reason is the reason for the condition's last transition. Reasons + are PascalCase + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type specifies the state of the operator's reconciliation + functionality. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + x-kubernetes-list-type: atomic + controllerCertificates: + description: controllerCertificates represents the latest available + observations of the automatically rotating certificates in the MCO. + items: + description: ControllerCertificate contains info about a specific + cert. + properties: + bundleFile: + description: bundleFile is the larger bundle a cert comes from + type: string + notAfter: + description: notAfter is the upper boundary for validity + format: date-time + type: string + notBefore: + description: notBefore is the lower boundary for validity + format: date-time + type: string + signer: + description: signer is the cert Issuer + type: string + subject: + description: subject is the cert subject + type: string + required: + - bundleFile + - signer + - subject + type: object + type: array + x-kubernetes-list-type: atomic + observedGeneration: + description: observedGeneration represents the generation observed + by the controller. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/install/0000_80_machine-config_01_machineconfignodes-DevPreviewNoUpgrade.crd.yaml b/install/0000_80_machine-config_01_machineconfignodes-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..eed92c7361 --- /dev/null +++ b/install/0000_80_machine-config_01_machineconfignodes-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,366 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1596 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: machineconfignodes.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: MachineConfigNode + listKind: MachineConfigNodeList + plural: machineconfignodes + singular: machineconfignode + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Updated")].status + name: Updated + type: string + - jsonPath: .status.conditions[?(@.type=="UpdatePrepared")].status + name: UpdatePrepared + type: string + - jsonPath: .status.conditions[?(@.type=="UpdateExecuted")].status + name: UpdateExecuted + type: string + - jsonPath: .status.conditions[?(@.type=="UpdatePostActionComplete")].status + name: UpdatePostActionComplete + type: string + - jsonPath: .status.conditions[?(@.type=="UpdateComplete")].status + name: UpdateComplete + type: string + - jsonPath: .status.conditions[?(@.type=="Resumed")].status + name: Resumed + type: string + - jsonPath: .status.conditions[?(@.type=="UpdateCompatible")].status + name: UpdateCompatible + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="AppliedFilesAndOS")].status + name: UpdatedFilesAndOS + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Cordoned")].status + name: CordonedNode + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Drained")].status + name: DrainedNode + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="RebootedNode")].status + name: RebootedNode + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="ReloadedCRIO")].status + name: ReloadedCRIO + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Uncordoned")].status + name: UncordonedNode + priority: 1 + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: 'MachineConfigNode describes the health of the Machines on the + system Compatibility level 4: No compatibility is provided, the API can + change at any point for any reason. These capabilities should not be used + by applications needing long term support.' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec describes the configuration of the machine config node. + properties: + configVersion: + description: configVersion holds the desired config version for the + node targeted by this machine config node resource. The desired + version represents the machine config the node will attempt to update + to. This gets set before the machine config operator validates the + new machine config against the current machine config. + properties: + desired: + description: desired is the name of the machine config that the + the node should be upgraded to. This value is set when the machine + config pool generates a new version of its rendered configuration. + When this value is changed, the machine config daemon starts + the node upgrade process. This value gets set in the machine + config node spec once the machine config has been targeted for + upgrade and before it is validated. Must be a lowercase RFC-1123 + hostname (https://tools.ietf.org/html/rfc1123) It may consist + of only alphanumeric characters, hyphens (-) and periods (.) + and must be at most 253 characters in length. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - desired + type: object + node: + description: node contains a reference to the node for this machine + config node. + properties: + name: + description: name is the object name. Must be a lowercase RFC-1123 + hostname (https://tools.ietf.org/html/rfc1123) It may consist + of only alphanumeric characters, hyphens (-) and periods (.) + and must be at most 253 characters in length. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - name + type: object + pinnedImageSets: + description: pinnedImageSets holds the desired pinned image sets that + this node should pin and pull. + items: + properties: + name: + description: name is the name of the pinned image set. Must + be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + It may consist of only alphanumeric characters, hyphens (-) + and periods (.) and must be at most 253 characters in length. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - name + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + pool: + description: pool contains a reference to the machine config pool + that this machine config node's referenced node belongs to. + properties: + name: + description: name is the object name. Must be a lowercase RFC-1123 + hostname (https://tools.ietf.org/html/rfc1123) It may consist + of only alphanumeric characters, hyphens (-) and periods (.) + and must be at most 253 characters in length. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - name + type: object + required: + - configVersion + - node + - pool + type: object + status: + description: status describes the last observed state of this machine + config node. + properties: + conditions: + description: conditions represent the observations of a machine config + node's current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + configVersion: + description: configVersion describes the current and desired machine + config for this node. The current version represents the current + machine config for the node and is updated after a successful update. + The desired version represents the machine config the node will + attempt to update to. This desired machine config has been compared + to the current machine config and has been validated by the machine + config operator as one that is valid and that exists. + properties: + current: + description: current is the name of the machine config currently + in use on the node. This value is updated once the machine config + daemon has completed the update of the configuration for the + node. This value should match the desired version unless an + upgrade is in progress. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) It may consist of only + alphanumeric characters, hyphens (-) and periods (.) and must + be at most 253 characters in length. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + desired: + description: desired is the MachineConfig the node wants to upgrade + to. This value gets set in the machine config node status once + the machine config has been validated against the current machine + config. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + It may consist of only alphanumeric characters, hyphens (-) + and periods (.) and must be at most 253 characters in length. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - desired + type: object + observedGeneration: + description: observedGeneration represents the generation observed + by the controller. This field is updated when the controller observes + a change to the desiredConfig in the configVersion of the machine + config node spec. + format: int64 + type: integer + pinnedImageSets: + description: pinnedImageSets describes the current and desired pinned + image sets for this node. The current version is the generation + of the pinned image set that has most recently been successfully + pulled and pinned on this node. The desired version is the generation + of the pinned image set that is targeted to be pulled and pinned + on this node. + items: + properties: + currentGeneration: + description: currentGeneration is the generation of the pinned + image set that has most recently been successfully pulled + and pinned on this node. + format: int32 + type: integer + desiredGeneration: + description: desiredGeneration version is the generation of + the pinned image set that is targeted to be pulled and pinned + on this node. + format: int32 + minimum: 0 + type: integer + lastFailedGeneration: + description: lastFailedGeneration is the generation of the most + recent pinned image set that failed to be pulled and pinned + on this node. + format: int32 + minimum: 0 + type: integer + lastFailedGenerationErrors: + description: lastFailedGenerationErrors is a list of errors + why the lastFailed generation failed to be pulled and pinned. + items: + type: string + maxItems: 10 + type: array + name: + description: name is the name of the pinned image set. Must + be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + It may consist of only alphanumeric characters, hyphens (-) + and periods (.) and must be at most 253 characters in length. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - name + type: object + x-kubernetes-validations: + - message: desired generation must be greater than or equal to the + current generation + rule: 'has(self.desiredGeneration) && has(self.currentGeneration) + ? self.desiredGeneration >= self.currentGeneration : true' + - message: desired generation must be greater than last failed generation + rule: 'has(self.lastFailedGeneration) && has(self.desiredGeneration) + ? self.desiredGeneration >= self.lastFailedGeneration : true' + - message: desired generation must be defined if last failed generation + is defined + rule: 'has(self.lastFailedGeneration) ? has(self.desiredGeneration): + true' + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - configVersion + type: object + required: + - spec + type: object + x-kubernetes-validations: + - message: spec.node.name should match metadata.name + rule: self.metadata.name == self.spec.node.name + served: true + storage: true + subresources: + status: {} diff --git a/install/0000_80_machine-config_01_machineconfigpools-DevPreviewNoUpgrade.crd.yaml b/install/0000_80_machine-config_01_machineconfigpools-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..c780364573 --- /dev/null +++ b/install/0000_80_machine-config_01_machineconfigpools-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,629 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1453 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: machineconfigpools.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: MachineConfigPool + listKind: MachineConfigPoolList + plural: machineconfigpools + shortNames: + - mcp + singular: machineconfigpool + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.configuration.name + name: Config + type: string + - description: When all the machines in the pool are updated to the correct machine + config. + jsonPath: .status.conditions[?(@.type=="Updated")].status + name: Updated + type: string + - description: When at least one of machine is not either not updated or is in + the process of updating to the desired machine config. + jsonPath: .status.conditions[?(@.type=="Updating")].status + name: Updating + type: string + - description: When progress is blocked on updating one or more nodes or the pool + configuration is failing. + jsonPath: .status.conditions[?(@.type=="Degraded")].status + name: Degraded + type: string + - description: Total number of machines in the machine config pool + jsonPath: .status.machineCount + name: MachineCount + type: number + - description: Total number of ready machines targeted by the pool + jsonPath: .status.readyMachineCount + name: ReadyMachineCount + type: number + - description: Total number of machines targeted by the pool that have the CurrentMachineConfig + as their config + jsonPath: .status.updatedMachineCount + name: UpdatedMachineCount + type: number + - description: Total number of machines marked degraded (or unreconcilable) + jsonPath: .status.degradedMachineCount + name: DegradedMachineCount + type: number + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: "MachineConfigPool describes a pool of MachineConfigs. \n Compatibility + level 1: Stable within a major release for a minimum of 12 months or 3 minor + releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MachineConfigPoolSpec is the spec for MachineConfigPool resource. + properties: + configuration: + description: The targeted MachineConfig object for the machine config + pool. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + source: + description: source is the list of MachineConfig objects that + were used to generate the single MachineConfig object specified + in `content`. + items: + description: "ObjectReference contains enough information to + let you inspect or modify the referred object. --- New uses + of this type are discouraged because of difficulty describing + its usage when embedded in APIs. 1. Ignored fields. It includes + many fields which are not generally honored. For instance, + ResourceVersion and FieldPath are both very rarely valid in + actual usage. 2. Invalid usage help. It is impossible to + add specific help for individual usage. In most embedded + usages, there are particular restrictions like, \"must refer + only to types A and B\" or \"UID not honored\" or \"name must + be restricted\". Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, + the validation rules are different by usage, which makes it + hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency + is on the group,resource tuple and the version of the actual + struct is irrelevant. 5. We cannot easily change it. Because + this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an + underspecified API type they do not control. \n Instead of + using this type, create a locally provided and used type that + is well-focused on your reference. For example, ServiceReferences + for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that + triggered the event) or if no container name is specified + "spec.containers[2]" (container with index 2 in this pod). + This syntax is chosen only to have some well-defined way + of referencing a part of an object. TODO: this design + is not final and this field is subject to change in the + future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + machineConfigSelector: + description: machineConfigSelector specifies a label selector for + MachineConfigs. Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + on how label and selectors work. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + maxUnavailable: + anyOf: + - type: integer + - type: string + description: "maxUnavailable defines either an integer number or percentage + of nodes in the pool that can go Unavailable during an update. This + includes nodes Unavailable for any reason, including user initiated + cordons, failing nodes, etc. The default value is 1. \n A value + larger than 1 will mean multiple nodes going unavailable during + the update, which may affect your workload stress on the remaining + nodes. You cannot set this value to 0 to stop updates (it will default + back to 1); to stop updates, use the 'paused' property instead. + Drain will respect Pod Disruption Budgets (PDBs) such as etcd quorum + guards, even if maxUnavailable is greater than one." + x-kubernetes-int-or-string: true + nodeSelector: + description: nodeSelector specifies a label selector for Machines + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + paused: + description: paused specifies whether or not changes to this machine + config pool should be stopped. This includes generating new desiredMachineConfig + and update of machines. + type: boolean + pinnedImageSets: + description: "pinnedImageSets specifies a sequence of PinnedImageSetRef + objects for the pool. Nodes within this pool will preload and pin + images defined in the PinnedImageSet. Before pulling images the + MachineConfigDaemon will ensure the total uncompressed size of all + the images does not exceed available resources. If the total size + of the images exceeds the available resources the controller will + report a Degraded status to the MachineConfigPool and not attempt + to pull any images. Also to help ensure the kubelet can mitigate + storage risk, the pinned_image configuration and subsequent service + reload will happen only after all of the images have been pulled + for each set. Images from multiple PinnedImageSets are loaded and + pinned sequentially as listed. Duplicate and existing images will + be skipped. \n Any failure to prefetch or pin images will result + in a Degraded pool. Resolving these failures is the responsibility + of the user. The admin should be proactive in ensuring adequate + storage and proper image authentication exists in advance." + items: + properties: + name: + description: name is a reference to the name of a PinnedImageSet. Must + adhere to RFC-1123 (https://tools.ietf.org/html/rfc1123). + Made up of one of more period-separated (.) segments, where + each segment consists of alphanumeric characters and hyphens + (-), must begin and end with an alphanumeric character, and + is at most 63 characters in length. The total length of the + name must not exceed 253 characters. + maxLength: 253 + minLength: 1 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - name + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + status: + description: MachineConfigPoolStatus is the status for MachineConfigPool + resource. + properties: + certExpirys: + description: certExpirys keeps track of important certificate expiration + data + items: + description: ceryExpiry contains the bundle name and the expiry + date + properties: + bundle: + description: bundle is the name of the bundle in which the subject + certificate resides + type: string + expiry: + description: expiry is the date after which the certificate + will no longer be valid + format: date-time + type: string + subject: + description: subject is the subject of the certificate + type: string + required: + - bundle + - subject + type: object + type: array + x-kubernetes-list-type: atomic + conditions: + description: conditions represents the latest available observations + of current state. + items: + description: MachineConfigPoolCondition contains condition information + for an MachineConfigPool. + properties: + lastTransitionTime: + description: lastTransitionTime is the timestamp corresponding + to the last status change of this condition. + format: date-time + nullable: true + type: string + message: + description: message is a human readable description of the + details of the last transition, complementing reason. + type: string + reason: + description: reason is a brief machine readable explanation + for the condition's last transition. + type: string + status: + description: status of the condition, one of ('True', 'False', + 'Unknown'). + type: string + type: + description: type of the condition, currently ('Done', 'Updating', + 'Failed'). + type: string + type: object + type: array + x-kubernetes-list-type: atomic + configuration: + description: configuration represents the current MachineConfig object + for the machine config pool. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + source: + description: source is the list of MachineConfig objects that + were used to generate the single MachineConfig object specified + in `content`. + items: + description: "ObjectReference contains enough information to + let you inspect or modify the referred object. --- New uses + of this type are discouraged because of difficulty describing + its usage when embedded in APIs. 1. Ignored fields. It includes + many fields which are not generally honored. For instance, + ResourceVersion and FieldPath are both very rarely valid in + actual usage. 2. Invalid usage help. It is impossible to + add specific help for individual usage. In most embedded + usages, there are particular restrictions like, \"must refer + only to types A and B\" or \"UID not honored\" or \"name must + be restricted\". Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, + the validation rules are different by usage, which makes it + hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency + is on the group,resource tuple and the version of the actual + struct is irrelevant. 5. We cannot easily change it. Because + this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an + underspecified API type they do not control. \n Instead of + using this type, create a locally provided and used type that + is well-focused on your reference. For example, ServiceReferences + for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that + triggered the event) or if no container name is specified + "spec.containers[2]" (container with index 2 in this pod). + This syntax is chosen only to have some well-defined way + of referencing a part of an object. TODO: this design + is not final and this field is subject to change in the + future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + degradedMachineCount: + description: degradedMachineCount represents the total number of machines + marked degraded (or unreconcilable). A node is marked degraded if + applying a configuration failed.. + format: int32 + type: integer + machineCount: + description: machineCount represents the total number of machines + in the machine config pool. + format: int32 + type: integer + observedGeneration: + description: observedGeneration represents the generation observed + by the controller. + format: int64 + type: integer + poolSynchronizersStatus: + description: poolSynchronizersStatus is the status of the machines + managed by the pool synchronizers. + items: + properties: + availableMachineCount: + description: availableMachineCount is the number of machines + managed by the node synchronizer which are available. + format: int64 + minimum: 0 + type: integer + machineCount: + description: machineCount is the number of machines that are + managed by the node synchronizer. + format: int64 + minimum: 0 + type: integer + observedGeneration: + description: observedGeneration is the last generation change + that has been applied. + format: int64 + minimum: 0 + type: integer + x-kubernetes-validations: + - message: observedGeneration must not move backwards except + to zero + rule: self >= oldSelf || (self == 0 && oldSelf > 0) + poolSynchronizerType: + description: poolSynchronizerType describes the type of the + pool synchronizer. + enum: + - PinnedImageSets + maxLength: 256 + type: string + readyMachineCount: + description: readyMachineCount is the number of machines managed + by the node synchronizer that are in a ready state. + format: int64 + minimum: 0 + type: integer + unavailableMachineCount: + description: unavailableMachineCount is the number of machines + managed by the node synchronizer but are unavailable. + format: int64 + minimum: 0 + type: integer + updatedMachineCount: + description: updatedMachineCount is the number of machines that + have been updated by the node synchronizer. + format: int64 + minimum: 0 + type: integer + required: + - availableMachineCount + - machineCount + - poolSynchronizerType + - readyMachineCount + - unavailableMachineCount + - updatedMachineCount + type: object + x-kubernetes-validations: + - message: machineCount must be greater than or equal to updatedMachineCount + rule: self.machineCount >= self.updatedMachineCount + - message: machineCount must be greater than or equal to availableMachineCount + rule: self.machineCount >= self.availableMachineCount + - message: machineCount must be greater than or equal to unavailableMachineCount + rule: self.machineCount >= self.unavailableMachineCount + - message: machineCount must be greater than or equal to readyMachineCount + rule: self.machineCount >= self.readyMachineCount + - message: availableMachineCount must be greater than or equal to + readyMachineCount + rule: self.availableMachineCount >= self.readyMachineCount + type: array + x-kubernetes-list-map-keys: + - poolSynchronizerType + x-kubernetes-list-type: map + readyMachineCount: + description: readyMachineCount represents the total number of ready + machines targeted by the pool. + format: int32 + type: integer + unavailableMachineCount: + description: unavailableMachineCount represents the total number of + unavailable (non-ready) machines targeted by the pool. A node is + marked unavailable if it is in updating state or NodeReady condition + is false. + format: int32 + type: integer + updatedMachineCount: + description: updatedMachineCount represents the total number of machines + targeted by the pool that have the CurrentMachineConfig as their + config. + format: int32 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/install/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml b/install/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..f274fba11a --- /dev/null +++ b/install/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,300 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1773 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: machineosbuilds.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: MachineOSBuild + listKind: MachineOSBuildList + plural: machineosbuilds + singular: machineosbuild + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Prepared")].status + name: Prepared + type: string + - jsonPath: .status.conditions[?(@.type=="Building")].status + name: Building + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Interrupted")].status + name: Interrupted + type: string + - jsonPath: .status.conditions[?(@.type=="Restarted")].status + name: Restarted + type: string + - jsonPath: .status.conditions[?(@.type=="Failed")].status + name: Failed + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: 'MachineOSBuild describes a build process managed and deployed + by the MCO Compatibility level 4: No compatibility is provided, the API + can change at any point for any reason. These capabilities should not be + used by applications needing long term support.' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec describes the configuration of the machine os build + properties: + configGeneration: + description: configGeneration tracks which version of MachineOSConfig + this build is based off of + format: int64 + minimum: 1 + type: integer + desiredConfig: + description: desiredConfig is the desired config we want to build + an image for. + properties: + name: + description: name is the name of the rendered MachineConfig object. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - name + type: object + machineOSConfig: + description: machineOSConfig is the config object which the build + is based off of + properties: + name: + description: name of the MachineOSConfig + type: string + required: + - name + type: object + renderedImagePushspec: + description: 'renderedImagePushspec is set from the MachineOSConfig + The format of the image pullspec is: host[:port][/namespace]/name: + or svc_name.namespace.svc[:port]/repository/name:' + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid :, where + '' is 64 characters long and '' is any valid string Or + it must be a valid .svc followed by a port, repository, image + name, and tag. + rule: ((self.split(':').size() == 2 && self.split(':')[1].matches('^([a-zA-Z0-9-./:])+$')) + || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme. Or it must + be a valid .svc followed by a port, repository, image name, and + tag. + rule: ((self.split(':').size() == 2 && self.split(':')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')) + || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$')) + version: + description: version tracks the newest MachineOSBuild for each MachineOSConfig + format: int64 + minimum: 1 + type: integer + required: + - configGeneration + - desiredConfig + - machineOSConfig + - renderedImagePushspec + - version + type: object + x-kubernetes-validations: + - message: machineOSBuildSpec is immutable once set + rule: self == oldSelf + status: + description: status describes the lst observed state of this machine os + build + properties: + buildEnd: + description: buildEnd describes when the build ended. + format: date-time + type: string + x-kubernetes-validations: + - message: buildEnd is immutable once set + rule: self == oldSelf + buildStart: + description: buildStart describes when the build started. + format: date-time + type: string + x-kubernetes-validations: + - message: buildStart is immutable once set + rule: self == oldSelf + builderReference: + description: ImageBuilderType describes the image builder set in the + MachineOSConfig + properties: + buildPod: + description: relatedObjects is a list of objects that are related + to the build process. + properties: + group: + description: group of the referent. + type: string + name: + description: name of the referent. + type: string + namespace: + description: namespace of the referent. + type: string + resource: + description: resource of the referent. + type: string + required: + - group + - name + - resource + type: object + imageBuilderType: + description: ImageBuilderType describes the image builder set + in the MachineOSConfig + type: string + required: + - imageBuilderType + type: object + x-kubernetes-validations: + - message: buildPod is required when imageBuilderType is PodImageBuilder, + and forbidden otherwise + rule: 'has(self.imageBuilderType) && self.imageBuilderType == ''PodImageBuilder'' + ? true : !has(self.buildPod)' + conditions: + description: 'conditions are state related conditions for the build. + Valid types are: Prepared, Building, Failed, Interrupted, and Succeeded + once a Build is marked as Failed, no future conditions can be set. + This is enforced by the MCO.' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + finalImagePullspec: + description: finalImagePushSpec describes the fully qualified pushspec + produced by this build that the final image can be. Must be in sha + format. + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: ((self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$'))) + relatedObjects: + description: relatedObjects is a list of objects that are related + to the build process. + items: + description: ObjectReference contains enough information to let + you inspect or modify the referred object. + properties: + group: + description: group of the referent. + type: string + name: + description: name of the referent. + type: string + namespace: + description: namespace of the referent. + type: string + resource: + description: resource of the referent. + type: string + required: + - group + - name + - resource + type: object + type: array + required: + - buildStart + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/install/0000_80_machine-config_01_machineosconfigs-DevPreviewNoUpgrade.crd.yaml b/install/0000_80_machine-config_01_machineosconfigs-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..cfd959f7f3 --- /dev/null +++ b/install/0000_80_machine-config_01_machineosconfigs-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,352 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1773 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: machineosconfigs.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: MachineOSConfig + listKind: MachineOSConfigList + plural: machineosconfigs + singular: machineosconfig + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: 'MachineOSConfig describes the configuration for a build process + managed by the MCO Compatibility level 4: No compatibility is provided, + the API can change at any point for any reason. These capabilities should + not be used by applications needing long term support.' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec describes the configuration of the machineosconfig + properties: + buildInputs: + description: buildInputs is where user input options for the build + live + properties: + baseImagePullSecret: + description: baseImagePullSecret is the secret used to pull the + base image. must live in the openshift-machine-config-operator + namespace + properties: + name: + description: name is the name of the secret used to push or + pull this MachineOSConfig object. this secret must be in + the openshift-machine-config-operator namespace. + type: string + required: + - name + type: object + baseOSExtensionsImagePullspec: + description: 'baseOSExtensionsImagePullspec is the base Extensions + image used in the build process the MachineOSConfig object will + use the in cluster image registry configuration. if you wish + to use a mirror or any other settings specific to registries.conf, + please specify those in the cluster wide registries.conf. The + format of the image pullspec is: host[:port][/namespace]/name@sha256:' + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: (self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: (self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')) + baseOSImagePullspec: + description: 'baseOSImagePullspec is the base OSImage we use to + build our custom image. the MachineOSConfig object will use + the in cluster image registry configuration. if you wish to + use a mirror or any other settings specific to registries.conf, + please specify those in the cluster wide registries.conf. The + format of the image pullspec is: host[:port][/namespace]/name@sha256:' + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: (self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: (self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')) + containerFile: + description: containerFile describes the custom data the user + has specified to build into the image. this is also commonly + called a Dockerfile and you can treat it as such. The content + is the content of your Dockerfile. + items: + description: MachineOSContainerfile contains all custom content + the user wants built into the image + properties: + containerfileArch: + default: noarch + description: 'containerfileArch describes the architecture + this containerfile is to be built for this arch is optional. + If the user does not specify an architecture, it is assumed + that the content can be applied to all architectures, + or in a single arch cluster: the only architecture.' + enum: + - arm64 + - amd64 + - ppc64le + - s390x + - aarch64 + - x86_64 + - noarch + type: string + content: + description: content is the custom content to be built + type: string + required: + - content + type: object + maxItems: 7 + minItems: 0 + type: array + x-kubernetes-list-map-keys: + - containerfileArch + x-kubernetes-list-type: map + imageBuilder: + description: machineOSImageBuilder describes which image builder + will be used in each build triggered by this MachineOSConfig + properties: + imageBuilderType: + default: PodImageBuilder + description: 'imageBuilderType specifies the backend to be + used to build the image. Valid options are: PodImageBuilder' + enum: + - PodImageBuilder + type: string + required: + - imageBuilderType + type: object + releaseVersion: + description: 'releaseVersion is associated with the base OS Image. + This is the version of Openshift that the Base Image is associated + with. This field is populated from the machine-config-osimageurl + configmap in the openshift-machine-config-operator namespace. + It will come in the format: 4.16.0-0.nightly-2024-04-03-065948 + or any valid release. The MachineOSBuilder populates this field + and validates that this is a valid stream. This is used as a + label in the dockerfile that builds the OS image.' + type: string + renderedImagePushSecret: + description: renderedImagePushSecret is the secret used to connect + to a user registry. the final image push and pull secrets should + be separate for security concerns. If the final image push secret + is somehow exfiltrated, that gives someone the power to push + images to the image repository. By comparison, if the final + image pull secret gets exfiltrated, that only gives someone + to pull images from the image repository. It's basically the + principle of least permissions. this push secret will be used + only by the MachineConfigController pod to push the image to + the final destination. Not all nodes will need to push this + image, most of them will only need to pull the image in order + to use it. + properties: + name: + description: name is the name of the secret used to push or + pull this MachineOSConfig object. this secret must be in + the openshift-machine-config-operator namespace. + type: string + required: + - name + type: object + renderedImagePushspec: + description: 'renderedImagePushspec describes the location of + the final image. the MachineOSConfig object will use the in + cluster image registry configuration. if you wish to use a mirror + or any other settings specific to registries.conf, please specify + those in the cluster wide registries.conf. The format of the + image pushspec is: host[:port][/namespace]/name: or svc_name.namespace.svc[:port]/repository/name:' + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid :, + where '' is 64 characters long and '' is any + valid string Or it must be a valid .svc followed by a port, + repository, image name, and tag. + rule: ((self.split(':').size() == 2 && self.split(':')[1].matches('^([a-zA-Z0-9-./:])+$')) + || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme. Or it must + be a valid .svc followed by a port, repository, image name, + and tag. + rule: ((self.split(':').size() == 2 && self.split(':')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')) + || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$')) + required: + - baseImagePullSecret + - imageBuilder + - renderedImagePushSecret + - renderedImagePushspec + type: object + buildOutputs: + description: buildOutputs is where user input options for the build + live + properties: + currentImagePullSecret: + description: currentImagePullSecret is the secret used to pull + the final produced image. must live in the openshift-machine-config-operator + namespace the final image push and pull secrets should be separate + for security concerns. If the final image push secret is somehow + exfiltrated, that gives someone the power to push images to + the image repository. By comparison, if the final image pull + secret gets exfiltrated, that only gives someone to pull images + from the image repository. It's basically the principle of least + permissions. this pull secret will be used on all nodes in the + pool. These nodes will need to pull the final OS image and boot + into it using rpm-ostree or bootc. + properties: + name: + description: name is the name of the secret used to push or + pull this MachineOSConfig object. this secret must be in + the openshift-machine-config-operator namespace. + type: string + required: + - name + type: object + type: object + machineConfigPool: + description: machineConfigPool is the pool which the build is for + properties: + name: + description: name of the MachineConfigPool object. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - name + type: object + required: + - buildInputs + - machineConfigPool + type: object + status: + description: status describes the status of the machineosconfig + properties: + conditions: + description: conditions are state related conditions for the config. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + currentImagePullspec: + description: currentImagePullspec is the fully qualified image pull + spec used by the MCO to pull down the new OSImage. This must include + sha256. + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: (self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: (self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')) + observedGeneration: + description: observedGeneration represents the generation observed + by the controller. this field is updated when the user changes the + configuration in BuildSettings or the MCP this object is associated + with. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/install/0000_80_machine-config_01_pinnedimagesets-DevPreviewNoUpgrade.crd.yaml b/install/0000_80_machine-config_01_pinnedimagesets-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..d8c32dc062 --- /dev/null +++ b/install/0000_80_machine-config_01_pinnedimagesets-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,165 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1713 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: pinnedimagesets.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: PinnedImageSet + listKind: PinnedImageSetList + plural: pinnedimagesets + singular: pinnedimageset + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "PinnedImageSet describes a set of images that should be pinned + by CRI-O and pulled to the nodes which are members of the declared MachineConfigPools. + \n Compatibility level 4: No compatibility is provided, the API can change + at any point for any reason. These capabilities should not be used by applications + needing long term support." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec describes the configuration of this pinned image set. + properties: + pinnedImages: + description: "pinnedImages is a list of OCI Image referenced by digest + that should be pinned and pre-loaded by the nodes of a MachineConfigPool. + Translates into a new file inside the /etc/crio/crio.conf.d directory + with content similar to this: \n pinned_images = [ \"quay.io/openshift-release-dev/ocp-release@sha256:...\", + \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:...\", \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:...\", + ... ] \n These image references should all be by digest, tags aren't + allowed." + items: + properties: + name: + description: "name is an OCI Image referenced by digest. \n + The format of the image ref is: host[:port][/namespace]/name@sha256:" + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$') + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$') + required: + - name + type: object + maxItems: 500 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - pinnedImages + type: object + status: + description: status describes the last observed state of this pinned image + set. + properties: + conditions: + description: conditions represent the observations of a pinned image + set's current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/pkg/controller/node/node_controller_test.go b/pkg/controller/node/node_controller_test.go index 142eb69b25..76c3b11060 100644 --- a/pkg/controller/node/node_controller_test.go +++ b/pkg/controller/node/node_controller_test.go @@ -27,7 +27,6 @@ import ( "k8s.io/client-go/tools/record" "github.com/davecgh/go-spew/spew" - apicfgv1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/api/config/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" fakeconfigv1client "github.com/openshift/client-go/config/clientset/versioned/fake" @@ -65,7 +64,6 @@ type fixture struct { schedulerClient *fakeconfigv1client.Clientset ccLister []*mcfgv1.ControllerConfig - mcLister []*mcfgv1.MachineConfig mcpLister []*mcfgv1.MachineConfigPool nodeLister []*corev1.Node @@ -75,7 +73,8 @@ type fixture struct { kubeobjects []runtime.Object objects []runtime.Object schedulerObjects []runtime.Object - schedulerLister []*apicfgv1.Scheduler + schedulerLister []*configv1.Scheduler + fgAccess featuregates.FeatureGateAccess } func newFixture(t *testing.T) *fixture { @@ -83,6 +82,12 @@ func newFixture(t *testing.T) *fixture { f.t = t f.objects = []runtime.Object{} f.kubeobjects = []runtime.Object{} + f.fgAccess = featuregates.NewHardcodedFeatureGateAccess( + []configv1.FeatureGateName{ + configv1.FeatureGatePinnedImages, + }, + []configv1.FeatureGateName{}, + ) return f } @@ -90,13 +95,12 @@ func (f *fixture) newControllerWithStopChan(stopCh <-chan struct{}) *Controller f.client = fake.NewSimpleClientset(f.objects...) f.kubeclient = k8sfake.NewSimpleClientset(f.kubeobjects...) f.schedulerClient = fakeconfigv1client.NewSimpleClientset(f.schedulerObjects...) - fgAccess := featuregates.NewHardcodedFeatureGateAccess(nil, nil) i := informers.NewSharedInformerFactory(f.client, noResyncPeriodFunc()) k8sI := kubeinformers.NewSharedInformerFactory(f.kubeclient, noResyncPeriodFunc()) ci := configv1informer.NewSharedInformerFactory(f.schedulerClient, noResyncPeriodFunc()) c := NewWithCustomUpdateDelay(i.Machineconfiguration().V1().ControllerConfigs(), i.Machineconfiguration().V1().MachineConfigs(), i.Machineconfiguration().V1().MachineConfigPools(), k8sI.Core().V1().Nodes(), - k8sI.Core().V1().Pods(), ci.Config().V1().Schedulers(), f.kubeclient, f.client, time.Millisecond, fgAccess) + k8sI.Core().V1().Pods(), ci.Config().V1().Schedulers(), f.kubeclient, f.client, time.Millisecond, f.fgAccess) c.ccListerSynced = alwaysReady c.mcpListerSynced = alwaysReady @@ -1200,7 +1204,11 @@ func TestShouldMakeProgress(t *testing.T) { } else { t.Logf("not expecting annotation") } - expStatus := calculateStatus([]*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) + fg, err := f.fgAccess.CurrentFeatureGates() + if err != nil { + t.Fatal(err) + } + expStatus := calculateStatus(fg, []*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) expMcp := mcp.DeepCopy() expMcp.Status = expStatus f.expectUpdateMachineConfigPoolStatus(expMcp) @@ -1251,8 +1259,11 @@ func TestPaused(t *testing.T) { for idx := range nodes { f.kubeobjects = append(f.kubeobjects, nodes[idx]) } - - expStatus := calculateStatus([]*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) + fg, err := f.fgAccess.CurrentFeatureGates() + if err != nil { + t.Fatal(err) + } + expStatus := calculateStatus(fg, []*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) expMcp := mcp.DeepCopy() expMcp.Status = expStatus f.expectUpdateMachineConfigPoolStatus(expMcp) @@ -1278,8 +1289,11 @@ func TestShouldUpdateStatusOnlyUpdated(t *testing.T) { for idx := range nodes { f.kubeobjects = append(f.kubeobjects, nodes[idx]) } - - expStatus := calculateStatus([]*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) + fg, err := f.fgAccess.CurrentFeatureGates() + if err != nil { + t.Fatal(err) + } + expStatus := calculateStatus(fg, []*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) expMcp := mcp.DeepCopy() expMcp.Status = expStatus f.expectUpdateMachineConfigPoolStatus(expMcp) @@ -1306,8 +1320,11 @@ func TestShouldUpdateStatusOnlyNoProgress(t *testing.T) { for idx := range nodes { f.kubeobjects = append(f.kubeobjects, nodes[idx]) } - - expStatus := calculateStatus([]*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) + fg, err := f.fgAccess.CurrentFeatureGates() + if err != nil { + t.Fatal(err) + } + expStatus := calculateStatus(fg, []*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) expMcp := mcp.DeepCopy() expMcp.Status = expStatus f.expectUpdateMachineConfigPoolStatus(expMcp) @@ -1339,8 +1356,12 @@ func TestCertStatus(t *testing.T) { for idx := range nodes { f.kubeobjects = append(f.kubeobjects, nodes[idx]) } + fg, err := f.fgAccess.CurrentFeatureGates() + if err != nil { + t.Fatal(err) + } - expStatus := calculateStatus([]*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) + expStatus := calculateStatus(fg, []*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) expMcp := mcp.DeepCopy() expMcp.Status = expStatus @@ -1360,7 +1381,11 @@ func TestShouldDoNothing(t *testing.T) { newNodeWithLabel("node-0", machineConfigV1, machineConfigV1, map[string]string{"node-role/worker": "", "node-role/infra": ""}), newNodeWithLabel("node-1", machineConfigV1, machineConfigV1, map[string]string{"node-role/worker": "", "node-role/infra": ""}), } - status := calculateStatus([]*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) + fg, err := f.fgAccess.CurrentFeatureGates() + if err != nil { + t.Fatal(err) + } + status := calculateStatus(fg, []*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) mcp.Status = status f.ccLister = append(f.ccLister, cc) @@ -1451,7 +1476,11 @@ func TestControlPlaneTopology(t *testing.T) { for _, node := range nodes { addNodeAnnotations(node, annotations) } - status := calculateStatus([]*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) + fg, err := f.fgAccess.CurrentFeatureGates() + if err != nil { + t.Fatal(err) + } + status := calculateStatus(fg, []*mcfgalphav1.MachineConfigNode{}, cc, mcp, nodes) mcp.Status = status f.ccLister = append(f.ccLister, cc) diff --git a/pkg/controller/node/status.go b/pkg/controller/node/status.go index 7975317e42..91445c7a67 100644 --- a/pkg/controller/node/status.go +++ b/pkg/controller/node/status.go @@ -6,6 +6,7 @@ import ( "strings" configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1" helpers "github.com/openshift/machine-config-operator/pkg/helpers" @@ -53,7 +54,7 @@ func (ctrl *Controller) syncStatusOnly(pool *mcfgv1.MachineConfigPool) error { machineConfigStates = append(machineConfigStates, ms) } } - newStatus := calculateStatus(machineConfigStates, cc, pool, nodes) + newStatus := calculateStatus(fg, machineConfigStates, cc, pool, nodes) if equality.Semantic.DeepEqual(pool.Status, newStatus) { return nil } @@ -72,7 +73,7 @@ func (ctrl *Controller) syncStatusOnly(pool *mcfgv1.MachineConfigPool) error { } //nolint:gocyclo -func calculateStatus(mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.ControllerConfig, pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node) mcfgv1.MachineConfigPoolStatus { +func calculateStatus(fg featuregates.FeatureGate, mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.ControllerConfig, pool *mcfgv1.MachineConfigPool, nodes []*corev1.Node) mcfgv1.MachineConfigPoolStatus { certExpirys := []mcfgv1.CertExpiry{} if cconfig != nil { for _, cert := range cconfig.Status.ControllerCertificates { @@ -234,8 +235,13 @@ func calculateStatus(mcs []*mcfgalphav1.MachineConfigNode, cconfig *mcfgv1.Contr // here we now set the MCP Degraded field, the node_controller is the one making the call right now // but we might have a dedicated controller or control loop somewhere else that understands how to // set Degraded. For now, the node_controller understand NodeDegraded & RenderDegraded = Degraded. + + pinnedImageSetsDegraded := false + if fg.Enabled(configv1.FeatureGatePinnedImages) { + pinnedImageSetsDegraded = apihelpers.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolPinnedImageSetsDegraded) + } + renderDegraded := apihelpers.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolRenderDegraded) - pinnedImageSetsDegraded := apihelpers.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolPinnedImageSetsDegraded) if nodeDegraded || renderDegraded || pinnedImageSetsDegraded { sdegraded := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolDegraded, corev1.ConditionTrue, "", "") apihelpers.SetMachineConfigPoolCondition(&status, *sdegraded) diff --git a/pkg/controller/node/status_test.go b/pkg/controller/node/status_test.go index c56172c770..2a4b8dad87 100644 --- a/pkg/controller/node/status_test.go +++ b/pkg/controller/node/status_test.go @@ -7,7 +7,9 @@ import ( mcfgalphav1 "github.com/openshift/api/machineconfiguration/v1alpha1" + apicfgv1 "github.com/openshift/api/config/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" "github.com/openshift/machine-config-operator/pkg/apihelpers" daemonconsts "github.com/openshift/machine-config-operator/pkg/daemon/constants" "github.com/openshift/machine-config-operator/test/helpers" @@ -938,7 +940,18 @@ func TestCalculateStatus(t *testing.T) { Paused: test.paused, }, } - status := calculateStatus([]*mcfgalphav1.MachineConfigNode{}, nil, pool, test.nodes) + fgAccess := featuregates.NewHardcodedFeatureGateAccess( + []apicfgv1.FeatureGateName{ + apicfgv1.FeatureGateMachineConfigNodes, + apicfgv1.FeatureGatePinnedImages, + }, + []apicfgv1.FeatureGateName{}, + ) + fg, err := fgAccess.CurrentFeatureGates() + if err != nil { + t.Fatal(err) + } + status := calculateStatus(fg, []*mcfgalphav1.MachineConfigNode{}, nil, pool, test.nodes) test.verify(status, t) }) } diff --git a/pkg/controller/pinnedimageset/pinned_image_set.go b/pkg/controller/pinnedimageset/pinned_image_set.go index 36e5cf99bb..8615337786 100644 --- a/pkg/controller/pinnedimageset/pinned_image_set.go +++ b/pkg/controller/pinnedimageset/pinned_image_set.go @@ -44,7 +44,7 @@ const ( delay = 5 * time.Second ) -// Controller defines the render controller. +// Controller defines the pinned image set controller. type Controller struct { client mcfgclientset.Interface eventRecorder record.EventRecorder @@ -101,7 +101,7 @@ func New( return ctrl } -// Run executes the render controller. +// Run executes the pinned image set controller. func (ctrl *Controller) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer ctrl.queue.ShutDown() @@ -353,7 +353,7 @@ func (ctrl *Controller) syncMachineConfigPool(key string) error { } func (ctrl *Controller) syncAvailableStatus(pool *mcfgv1.MachineConfigPool) error { - if apihelpers.IsMachineConfigPoolConditionFalse(pool.Status.Conditions, mcfgv1.MachineConfigPoolRenderDegraded) { + if apihelpers.IsMachineConfigPoolConditionFalse(pool.Status.Conditions, mcfgv1.MachineConfigPoolPinnedImageSetsDegraded) { return nil } sdegraded := apihelpers.NewMachineConfigPoolCondition(mcfgv1.MachineConfigPoolPinnedImageSetsDegraded, corev1.ConditionFalse, "", "") diff --git a/pkg/controller/pinnedimageset/pinned_image_set_test.go b/pkg/controller/pinnedimageset/pinned_image_set_test.go index eec5bf1ce0..c420ba0cf0 100644 --- a/pkg/controller/pinnedimageset/pinned_image_set_test.go +++ b/pkg/controller/pinnedimageset/pinned_image_set_test.go @@ -5,159 +5,90 @@ import ( "testing" "time" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - k8sfake "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" + "k8s.io/client-go/kubernetes/fake" - mcfgv1 "github.com/openshift/api/machineconfiguration/v1" mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" - "github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake" - informers "github.com/openshift/client-go/machineconfiguration/informers/externalversions" - ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" + fakemco "github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake" + mcfginformers "github.com/openshift/client-go/machineconfiguration/informers/externalversions" "github.com/openshift/machine-config-operator/test/helpers" "github.com/stretchr/testify/require" ) -var ( - alwaysReady = func() bool { return true } - noResyncPeriodFunc = func() time.Duration { return 0 } - masterPoolSelector = metav1.AddLabelToSelector(&metav1.LabelSelector{}, "machineconfiguration.openshift.io/role", "master") -) - -type fixture struct { - t *testing.T - - client *fake.Clientset - - mcpLister []*mcfgv1.MachineConfigPool - imageSetLister []*mcfgv1alpha1.PinnedImageSet - mcpIndexer cache.Indexer - - objects []runtime.Object -} - -func newFixture(t *testing.T) *fixture { - f := &fixture{} - f.t = t - f.objects = []runtime.Object{} - return f -} - -func (f *fixture) newController() *Controller { - f.client = fake.NewSimpleClientset(f.objects...) - - i := informers.NewSharedInformerFactory(f.client, noResyncPeriodFunc()) - - pisInformer := i.Machineconfiguration().V1alpha1().PinnedImageSets() - f.mcpIndexer = pisInformer.Informer().GetIndexer() - - c := New( - pisInformer, - i.Machineconfiguration().V1().MachineConfigPools(), - k8sfake.NewSimpleClientset(), - f.client, - ) - - c.mcpListerSynced = alwaysReady - c.mcpListerSynced = alwaysReady - c.imageSetSynced = alwaysReady - c.eventRecorder = ctrlcommon.NamespacedEventRecorder(&record.FakeRecorder{}) - - stopCh := make(chan struct{}) - defer close(stopCh) - i.Start(stopCh) - i.WaitForCacheSync(stopCh) - - for _, c := range f.mcpLister { - i.Machineconfiguration().V1().MachineConfigPools().Informer().GetIndexer().Add(c) - } - for _, p := range f.imageSetLister { - f.mcpIndexer.Add(p) - } - - return c -} - -func (f *fixture) updatePinnedImageSet(pis *mcfgv1alpha1.PinnedImageSet) error { - return f.mcpIndexer.Update(pis) -} - -func (f *fixture) deletePinnedImageSet(pis *mcfgv1alpha1.PinnedImageSet) error { - return f.mcpIndexer.Delete(pis) -} - -func TestUpdateSpecNewPinnedImageSet(t *testing.T) { +func TestSyncHandler(t *testing.T) { require := require.New(t) - f := newFixture(t) - mcp := helpers.NewMachineConfigPool("test-cluster-master", masterPoolSelector, helpers.MasterSelector, "") - pis := fakePinnedImageSet("test-pinned-image-set", "master", map[string]string{"machineconfiguration.openshift.io/role": "master"}) - - f.mcpLister = append(f.mcpLister, mcp) - f.objects = append(f.objects, mcp) - f.imageSetLister = append(f.imageSetLister, pis) - f.objects = append(f.objects, pis) - - c := f.newController() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() tests := []struct { - name string - pis *mcfgv1alpha1.PinnedImageSet - deletePis *mcfgv1alpha1.PinnedImageSet - wantSpecPis []mcfgv1.PinnedImageSetRef - wantErr error - wantImageErr error + name string + machineConfigPool runtime.Object + pis runtime.Object + pinnedImageSets []mcfgv1.PinnedImageSetRef + wantErr error }{ { - name: "happy path", - pis: pis, - wantSpecPis: []mcfgv1.PinnedImageSetRef{ - { - Name: pis.Name, - }, - }, + name: "happy path", + machineConfigPool: masterPool, + pis: masterPis, + pinnedImageSets: []mcfgv1.PinnedImageSetRef{{Name: masterPis.Name}}, }, { - name: "update labels to worker", - pis: fakePinnedImageSet("test-pinned-image-set", "master", map[string]string{"machineconfiguration.openshift.io/role": "worker"}), - wantSpecPis: nil, + name: "pool mismatch", + machineConfigPool: workerPool, + pis: masterPis, + pinnedImageSets: nil, }, { - name: "multiple labels single match", - pis: fakePinnedImageSet("test-pinned-image-set", "infr", map[string]string{ - "machineconfiguration.openshift.io/role": "master", - "test-label": "", - }), - wantSpecPis: []mcfgv1.PinnedImageSetRef{ - { - Name: pis.Name, - }, - }, + name: "no pinned image sets", + machineConfigPool: masterPool, + pis: nil, + pinnedImageSets: nil, + }, + { + name: "custom infra pool", + machineConfigPool: infraPool, + pis: infraPis, + pinnedImageSets: []mcfgv1.PinnedImageSetRef{{Name: infraPis.Name}}, }, { - name: "delete pinned image set", - deletePis: pis, - wantSpecPis: nil, + name: "custom infra pool also gets worker pinned image sets", + machineConfigPool: infraPool, + pis: workerPis, + pinnedImageSets: []mcfgv1.PinnedImageSetRef{{Name: workerPis.Name}}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if tt.deletePis != nil { - err := f.deletePinnedImageSet(tt.deletePis) - require.NoError(err) - } else { - err := f.updatePinnedImageSet(tt.pis) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fakeClient := fake.NewSimpleClientset() + fakeMCOClient := fakemco.NewSimpleClientset(tt.machineConfigPool) + sharedInformers := mcfginformers.NewSharedInformerFactory(fakeMCOClient, noResyncPeriodFunc()) + mcpInformer := sharedInformers.Machineconfiguration().V1().MachineConfigPools() + imageSetInformer := sharedInformers.Machineconfiguration().V1alpha1().PinnedImageSets() + + sharedInformers.Start(ctx.Done()) + sharedInformers.WaitForCacheSync(ctx.Done()) + + err := mcpInformer.Informer().GetIndexer().Add(tt.machineConfigPool) + require.NoError(err) + + if tt.pis != nil { + err = imageSetInformer.Informer().GetIndexer().Add(tt.pis) require.NoError(err) } - err := c.syncHandler(mcp.Name) + + c := New(imageSetInformer, mcpInformer, fakeClient, fakeMCOClient) + mcp, ok := tt.machineConfigPool.(*mcfgv1.MachineConfigPool) + require.True(ok) + + err = c.syncHandler(mcp.Name) require.NoError(err) mcpNew, err := c.client.MachineconfigurationV1().MachineConfigPools().Get(ctx, mcp.Name, metav1.GetOptions{}) require.NoError(err) - require.Equal(mcpNew.Spec.PinnedImageSets, tt.wantSpecPis) + require.Equal(mcpNew.Spec.PinnedImageSets, tt.pinnedImageSets) }) } } @@ -177,3 +108,24 @@ func fakePinnedImageSet(name, image string, labels map[string]string) *mcfgv1alp }, } } + +var ( + noResyncPeriodFunc = func() time.Duration { return 0 } + masterPoolSelector = metav1.AddLabelToSelector(&metav1.LabelSelector{}, "machineconfiguration.openshift.io/role", "master") + workerPoolSelector = metav1.AddLabelToSelector(&metav1.LabelSelector{}, "machineconfiguration.openshift.io/role", "worker") + masterPis = fakePinnedImageSet("master-set", "image1", map[string]string{"machineconfiguration.openshift.io/role": "master"}) + infraPis = fakePinnedImageSet("infra-set", "image1", map[string]string{"machineconfiguration.openshift.io/role": "infra"}) + workerPis = fakePinnedImageSet("worker-set", "image1", map[string]string{"machineconfiguration.openshift.io/role": "worker"}) + masterPool = helpers.NewMachineConfigPool("master", masterPoolSelector, helpers.MasterSelector, "") + workerPool = helpers.NewMachineConfigPool("worker", workerPoolSelector, helpers.WorkerSelector, "") + infraPool = helpers.NewMachineConfigPool("infra", infraPoolSelector, helpers.InfraSelector, "") + infraPoolSelector = &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "machineconfiguration.openshift.io/role", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"worker", "infra"}, + }, + }, + } +) diff --git a/pkg/daemon/cri/cri.go b/pkg/daemon/cri/cri.go index 3298dda90a..9ba72369c0 100644 --- a/pkg/daemon/cri/cri.go +++ b/pkg/daemon/cri/cri.go @@ -41,6 +41,8 @@ type Client struct { image runtimeapi.ImageServiceClient } +// PullImage pulls the image from the container runtime. The auth parameter can +// be nil if the image does not require authentication. func (c *Client) PullImage(ctx context.Context, image string, auth *runtimeapi.AuthConfig) error { resp, err := c.image.PullImage(ctx, &runtimeapi.PullImageRequest{ Image: &runtimeapi.ImageSpec{ diff --git a/pkg/daemon/pinned_image_set.go b/pkg/daemon/pinned_image_set.go index 64bb116145..14993563dd 100644 --- a/pkg/daemon/pinned_image_set.go +++ b/pkg/daemon/pinned_image_set.go @@ -10,12 +10,16 @@ import ( "os" "os/exec" "reflect" + "sort" "strings" "sync" + "syscall" "time" "github.com/BurntSushi/toml" "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" ign3types "github.com/coreos/ignition/v2/config/v3_4/types" "github.com/golang/groupcache/lru" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -69,11 +73,10 @@ const ( ) var ( - errInsufficientStorage = errors.New("storage available is less than minimum required") - errFailedToPullImage = errors.New("failed to pull image") - errNotFound = errors.New("not found") - errNoAuthForImage = errors.New("no auth found for image") - errImageSetGenerationUpdated = errors.New("image set generation updated") + errInsufficientStorage = errors.New("storage available is less than minimum required") + errFailedToPullImage = errors.New("failed to pull image") + errNotFound = errors.New("not found") + errNoAuthForImage = errors.New("no auth found for image") ) // PinnedImageSetManager manages the prefetching of images. @@ -100,6 +103,8 @@ type PinnedImageSetManager struct { minStorageAvailableBytes resource.Quantity // path to the authfile authFilePath string + // path to the registry config file + registryCfgPath string // endpoint of the container runtime service runtimeEndpoint string // timeout for the prefetch operation @@ -108,7 +113,7 @@ type PinnedImageSetManager struct { backoff wait.Backoff // cache for reusable image information - cache *lru.Cache + cache *imageCache syncHandler func(string) error enqueueMachineConfigPool func(*mcfgv1.MachineConfigPool) @@ -129,8 +134,9 @@ func NewPinnedImageSetManager( nodeInformer coreinformersv1.NodeInformer, mcpInformer mcfginformersv1.MachineConfigPoolInformer, minStorageAvailableBytes resource.Quantity, - runtimeEndpoint string, - authFilePath string, + runtimeEndpoint, + authFilePath, + registryCfgPath string, prefetchTimeout time.Duration, featureGatesAccessor featuregates.FeatureGateAccess, ) *PinnedImageSetManager { @@ -139,6 +145,7 @@ func NewPinnedImageSetManager( mcfgClient: mcfgClient, runtimeEndpoint: runtimeEndpoint, authFilePath: authFilePath, + registryCfgPath: registryCfgPath, prefetchTimeout: prefetchTimeout, minStorageAvailableBytes: minStorageAvailableBytes, featureGatesAccessor: featureGatesAccessor, @@ -177,17 +184,11 @@ func NewPinnedImageSetManager( p.mcpLister = mcpInformer.Lister() p.mcpSynced = mcpInformer.Informer().HasSynced - p.cache = lru.New(256) + p.cache = newImageCache(256) return p } -type imageInfo struct { - Name string - Size int64 - Pulled bool -} - func (p *PinnedImageSetManager) sync(key string) error { klog.V(4).Infof("Syncing PinnedImageSet for pool: %s", key) node, err := p.nodeLister.Get(p.nodeName) @@ -212,9 +213,11 @@ func (p *PinnedImageSetManager) sync(key string) error { if errors.Is(err, context.DeadlineExceeded) { ctxErr := fmt.Errorf("requeue: prefetching images incomplete after: %v", p.prefetchTimeout) p.updateStatusError(pools, ctxErr) - klog.Error(ctxErr) + klog.Info(ctxErr) return ctxErr } + + klog.Errorf("Failed to prefetch and pin images: %v", err) p.updateStatusError(pools, err) return err } @@ -226,15 +229,6 @@ func (p *PinnedImageSetManager) sync(key string) error { func (p *PinnedImageSetManager) syncMachineConfigPools(ctx context.Context, pools []*mcfgv1.MachineConfigPool) error { images := make([]mcfgv1alpha1.PinnedImageRef, 0, 100) for _, pool := range pools { - validTarget, err := validateTargetPoolForNode(p.nodeName, p.nodeLister, pool) - if err != nil { - klog.Errorf("failed to validate pool %q: %v", pool.Name, err) - return err - } - if !validTarget { - continue - } - if err := p.syncMachineConfigPool(ctx, pool); err != nil { return err } @@ -253,9 +247,10 @@ func (p *PinnedImageSetManager) syncMachineConfigPools(ctx context.Context, pool } } - // verify all images are pulled and available if not clear the cache and requeue - for _, image := range images { - exists, err := p.criClient.ImageStatus(ctx, image.Name) + // verify all images available if not clear the cache and requeue + imageNames := uniqueSortedImageNames(images) + for _, image := range imageNames { + exists, err := p.criClient.ImageStatus(ctx, image) if err != nil { return err } @@ -265,8 +260,9 @@ func (p *PinnedImageSetManager) syncMachineConfigPools(ctx context.Context, pool } } - // write config and reload crio last to allow a window for kubelet to gc images in emergency - if err := p.ensureCrioPinnedImagesConfigFile(images); err != nil { + // write config and reload crio last to allow a window for kubelet to gc + // images in an emergency + if err := ensureCrioPinnedImagesConfigFile(crioPinnedImagesDropInFilePath, imageNames); err != nil { klog.Errorf("failed to write crio config file: %v", err) return err } @@ -274,29 +270,6 @@ func (p *PinnedImageSetManager) syncMachineConfigPools(ctx context.Context, pool return nil } -// validateTargetPoolForNode checks if the node is a target for the given pool. -func validateTargetPoolForNode(nodeName string, nodeLister corev1lister.NodeLister, pool *mcfgv1.MachineConfigPool) (bool, error) { - if pool.Spec.PinnedImageSets == nil { - return false, nil - } - - selector, err := metav1.LabelSelectorAsSelector(pool.Spec.NodeSelector) - if err != nil { - return false, fmt.Errorf("invalid label selector: %w", err) - } - - nodes, err := nodeLister.List(selector) - if err != nil { - return false, err - } - - if !isNodeInPool(nodes, nodeName) { - return false, nil - } - - return true, nil -} - func (p *PinnedImageSetManager) syncMachineConfigPool(ctx context.Context, pool *mcfgv1.MachineConfigPool) error { if pool.Spec.PinnedImageSets == nil { return nil @@ -319,9 +292,28 @@ func (p *PinnedImageSetManager) syncMachineConfigPool(ctx context.Context, pool return p.prefetchImageSets(ctx, imageSets...) } +func (p *PinnedImageSetManager) checkNodeAllocatableStorage(ctx context.Context, imageSet *mcfgv1alpha1.PinnedImageSet) error { + node, err := p.nodeLister.Get(p.nodeName) + if err != nil { + return fmt.Errorf("failed to get node %q: %w", p.nodeName, err) + } + + storageCapacity, ok := node.Status.Allocatable[corev1.ResourceEphemeralStorage] + if !ok { + return fmt.Errorf("node %q has no ephemeral storage capacity", p.nodeName) + } + + capacity := storageCapacity.Value() + if storageCapacity.Cmp(p.minStorageAvailableBytes) < 0 { + return fmt.Errorf("%w capacity: %d, required: %d", errInsufficientStorage, capacity, p.minStorageAvailableBytes.Value()) + } + + return p.checkImagePayloadStorage(ctx, imageSet.Spec.PinnedImages, capacity) +} + // prefetchImageSets schedules the prefetching of images for the given image sets and waits for completion. func (p *PinnedImageSetManager) prefetchImageSets(ctx context.Context, imageSets ...*mcfgv1alpha1.PinnedImageSet) error { - registryAuth, err := newRegistryAuth(p.authFilePath) + registryAuth, err := newRegistryAuth(p.authFilePath, p.registryCfgPath) if err != nil { return err } @@ -329,40 +321,84 @@ func (p *PinnedImageSetManager) prefetchImageSets(ctx context.Context, imageSets // monitor prefetch operations monitor := newPrefetchMonitor() for _, imageSet := range imageSets { + // this is forbidden by the API validation rules, but we should check anyway + if len(imageSet.Spec.PinnedImages) == 0 { + continue + } + + cachedImage, ok := p.cache.Get(string(imageSet.UID)) + if ok { + cachedImageSet := cachedImage.(mcfgv1alpha1.PinnedImageSet) + if imageSet.Generation == cachedImageSet.Generation { + klog.V(4).Infof("Skipping prefetch for image set %q, generation %d already complete", imageSet.Name, imageSet.Generation) + continue + } + } if err := p.scheduleWork(ctx, p.prefetchCh, registryAuth, imageSet.Spec.PinnedImages, monitor); err != nil { return err } } - return monitor.WaitForDone() -} + if err := monitor.WaitForDone(); err != nil { + return err + } -func isNodeInPool(nodes []*corev1.Node, nodeName string) bool { - for _, n := range nodes { - if n.Name == nodeName { - return true - } + // cache the completed image sets + for _, imageSet := range imageSets { + imageSetCache := imageSet.DeepCopy() + imageSetCache.Spec.PinnedImages = nil + p.cache.Add(string(imageSet.UID), *imageSet) } - return false + + return nil } -func (p *PinnedImageSetManager) checkNodeAllocatableStorage(ctx context.Context, imageSet *mcfgv1alpha1.PinnedImageSet) error { - node, err := p.nodeLister.Get(p.nodeName) - if err != nil { - return fmt.Errorf("failed to get node %q: %w", p.nodeName, err) +// scheduleWork schedules the prefetch work for the images and collects the first error encountered. +func (p *PinnedImageSetManager) scheduleWork(ctx context.Context, prefetchCh chan prefetch, registryAuth *registryAuth, prefetchImages []mcfgv1alpha1.PinnedImageRef, monitor *prefetchMonitor) error { + totalImages := len(prefetchImages) + updateIncrement := totalImages / 4 + if updateIncrement == 0 { + updateIncrement = 1 // Ensure there's at least one update if the image count is less than 4 } + scheduledImages := 0 + for _, imageRef := range prefetchImages { + select { + case <-ctx.Done(): + return ctx.Err() + default: + image := imageRef.Name - storageCapacity, ok := node.Status.Capacity[corev1.ResourceEphemeralStorage] - if !ok { - return fmt.Errorf("node %q has no ephemeral storage capacity", p.nodeName) - } + // check cache if image is pulled + // this is an optimization to speedup prefetching after requeue + if value, found := p.cache.Get(image); found { + imageInfo, ok := value.(imageInfo) + if ok { + if imageInfo.Pulled { + scheduledImages++ + continue + } + } + } - capacity := storageCapacity.Value() - if storageCapacity.Cmp(p.minStorageAvailableBytes) < 0 { - return fmt.Errorf("%w capacity: %d, required: %d", errInsufficientStorage, capacity, p.minStorageAvailableBytes.Value()) - } + authConfig, err := registryAuth.getAuthConfigForImage(image) + if err != nil { + return fmt.Errorf("failed to get auth config for image %s: %w", image, err) + } + monitor.Add(1) + prefetchCh <- prefetch{ + image: image, + auth: authConfig, + monitor: monitor, + } - return p.checkImagePayloadStorage(ctx, imageSet.Spec.PinnedImages, capacity) + scheduledImages++ + // report status every 25% of images scheduled + if scheduledImages%updateIncrement == 0 { + klog.Infof("Completed scheduling %d%% of images", (scheduledImages*100)/totalImages) + } + } + } + return nil } func (p *PinnedImageSetManager) checkImagePayloadStorage(ctx context.Context, images []mcfgv1alpha1.PinnedImageRef, capacity int64) error { @@ -421,59 +457,43 @@ func (p *PinnedImageSetManager) checkImagePayloadStorage(ctx context.Context, im return nil } -func isImageSetInPool(imageSet string, pool *mcfgv1.MachineConfigPool) bool { - for _, set := range pool.Spec.PinnedImageSets { - if set.Name == imageSet { - return true - } - } - return false -} - -// ensureCrioPinnedImagesConfigFile creates the crio config file which populates pinned_images with a unique list of images then reloads crio. -// If the list is empty, and a config exists on disk the file is removed and crio is reloaded. -func (p *PinnedImageSetManager) ensureCrioPinnedImagesConfigFile(images []mcfgv1alpha1.PinnedImageRef) error { - hasCrioConfigFile, err := p.hasPinnedImagesConfigFile() +// ensureCrioPinnedImagesConfigFile ensures the crio config file is up to date with the pinned images. +func ensureCrioPinnedImagesConfigFile(path string, imageNames []string) error { + cfgExists, err := hasConfigFile(path) if err != nil { return fmt.Errorf("failed to check crio config file: %w", err) } - // if there are no images and the config file exists, remove it - if len(images) == 0 { - if hasCrioConfigFile { - // remove the crio config file and reload crio - if err := p.deleteCrioConfigFile(); err != nil { - return fmt.Errorf("failed to remove crio config file: %w", err) - } - return p.crioReload() + if cfgExists && len(imageNames) == 0 { + if err := deleteCrioConfigFile(); err != nil { + return fmt.Errorf("failed to remove CRI-O config file: %w", err) } - return nil + return crioReload() } - // ensure list elements are unique - // assume the number of duplicates is small so preallocate makes sense - // for larger lists - seen := make(map[string]struct{}, len(images)) - unique := make([]string, 0, len(images)) - for _, image := range images { - if _, ok := seen[image.Name]; !ok { - seen[image.Name] = struct{}{} - unique = append(unique, image.Name) + var existingCfgBytes []byte + if cfgExists { + existingCfgBytes, err = os.ReadFile(path) + if err != nil { + return fmt.Errorf("failed to read CRIO config file: %w", err) } } - // create ignition file for crio drop-in config. this is done to use the existing - // atomic writer functionality. - ignFile, err := createCrioConfigIgnitionFile(unique) + newCfgBytes, err := createCrioConfigFileBytes(imageNames) if err != nil { return fmt.Errorf("failed to create crio config ignition file: %w", err) } - skipCertificateWrite := true - if err := writeFiles([]ign3types.File{ignFile}, skipCertificateWrite); err != nil { - return fmt.Errorf("failed to write crio config file: %w", err) + // if the existing config is the same as the new config, do nothing + if !bytes.Equal(bytes.TrimSpace(existingCfgBytes), bytes.TrimSpace(newCfgBytes)) { + ignFile := ctrlcommon.NewIgnFileBytes(path, newCfgBytes) + if err := writeFiles([]ign3types.File{ignFile}, true); err != nil { + return fmt.Errorf("failed to write CRIO config file: %w", err) + } + return crioReload() } - return p.crioReload() + + return nil } func (p *PinnedImageSetManager) updateStatusProgressingComplete(pools []*mcfgv1.MachineConfigPool, message string) { @@ -483,7 +503,7 @@ func (p *PinnedImageSetManager) updateStatusProgressingComplete(pools []*mcfgv1. return } - applyCfg, imageSets, err := getImageSetApplyConfigs(p.imageSetLister, pools, nil) + applyCfg, imageSets, err := getImageSetMetadata(p.imageSetLister, pools, nil) if err != nil { klog.Errorf("Failed to get image set apply configs: %v", err) return @@ -536,7 +556,7 @@ func (p *PinnedImageSetManager) updateStatusError(pools []*mcfgv1.MachineConfigP return } - applyCfg, imageSets, err := getImageSetApplyConfigs(p.imageSetLister, pools, nil) + applyCfg, imageSets, err := getImageSetMetadata(p.imageSetLister, pools, nil) if err != nil { klog.Errorf("Failed to get image set apply configs: %v", err) return @@ -561,7 +581,7 @@ func (p *PinnedImageSetManager) updateStatusError(pools []*mcfgv1.MachineConfigP klog.Errorf("Failed to updated machine config node: %v", err) } - applyCfg, imageSets, err = getImageSetApplyConfigs(p.imageSetLister, pools, statusErr) + applyCfg, imageSets, err = getImageSetMetadata(p.imageSetLister, pools, statusErr) if err != nil { klog.Errorf("Failed to get image set apply configs: %v", err) return @@ -587,8 +607,8 @@ func (p *PinnedImageSetManager) updateStatusError(pools []*mcfgv1.MachineConfigP } } -// getImageSetApplyConfigs populates image set generation details for status updates to MachineConfigNode. -func getImageSetApplyConfigs(imageSetLister mcfglistersv1alpha1.PinnedImageSetLister, pools []*mcfgv1.MachineConfigPool, statusErr error) ([]*machineconfigurationalphav1.MachineConfigNodeStatusPinnedImageSetApplyConfiguration, []mcfgv1alpha1.MachineConfigNodeSpecPinnedImageSet, error) { +// getImageSetMetadata populates observed pinned image set generation details for node level status updates to MachineConfigNode. +func getImageSetMetadata(imageSetLister mcfglistersv1alpha1.PinnedImageSetLister, pools []*mcfgv1.MachineConfigPool, statusErr error) ([]*machineconfigurationalphav1.MachineConfigNodeStatusPinnedImageSetApplyConfiguration, []mcfgv1alpha1.MachineConfigNodeSpecPinnedImageSet, error) { var mcnImageSets []mcfgv1alpha1.MachineConfigNodeSpecPinnedImageSet var imageSetApplyConfigs []*machineconfigurationalphav1.MachineConfigNodeStatusPinnedImageSetApplyConfiguration for _, pool := range pools { @@ -643,6 +663,7 @@ func (p *PinnedImageSetManager) getWorkerCount() (int, error) { func (p *PinnedImageSetManager) resetWorkload(cancelFn context.CancelFunc) { p.mu.Lock() if p.cancelFn != nil { + klog.V(4).Info("Reset workload") p.cancelFn() } p.cancelFn = cancelFn @@ -650,9 +671,9 @@ func (p *PinnedImageSetManager) resetWorkload(cancelFn context.CancelFunc) { } func (p *PinnedImageSetManager) cancelWorkload(reason string) { - klog.Infof("Cancelling workload: %s", reason) p.mu.Lock() if p.cancelFn != nil { + klog.Infof("Cancelling workload: %s", reason) p.cancelFn() p.cancelFn = nil } @@ -670,7 +691,11 @@ func (p *PinnedImageSetManager) prefetchWorker(ctx context.Context) { cachedImage, ok := p.cache.Get(task.image) if ok { - imageInfo := cachedImage.(imageInfo) + imageInfo, ok := cachedImage.(imageInfo) + if !ok { + klog.Warningf("corrupted cache entry for image %q, deleting", task.image) + p.cache.Remove(task.image) + } imageInfo.Pulled = true p.cache.Add(task.image, imageInfo) } else { @@ -686,143 +711,6 @@ func (p *PinnedImageSetManager) prefetchWorker(ctx context.Context) { } } -// scheduleWork schedules the prefetch work for the images and collects the first error encountered. -func (p *PinnedImageSetManager) scheduleWork(ctx context.Context, prefetchCh chan prefetch, registryAuth *registryAuth, prefetchImages []mcfgv1alpha1.PinnedImageRef, monitor *prefetchMonitor) error { - totalImages := len(prefetchImages) - updateIncrement := totalImages / 4 - if updateIncrement == 0 { - updateIncrement = 1 // Ensure there's at least one update if the image count is less than 4 - } - scheduledImages := 0 - for _, imageRef := range prefetchImages { - select { - case <-ctx.Done(): - return ctx.Err() - default: - image := imageRef.Name - - // check cache if image is pulled - // this is an optimization to speedup prefetching after requeue - if value, found := p.cache.Get(image); found { - imageInfo, ok := value.(imageInfo) - if ok { - if imageInfo.Pulled { - scheduledImages++ - if scheduledImages%updateIncrement == 0 { - klog.Infof("Completed scheduling %d%% of images", (scheduledImages*100)/totalImages) - } - continue - } - } - } - - authConfig, err := registryAuth.getAuthConfigForImage(image) - if err != nil { - return fmt.Errorf("failed to get auth config for image %s: %w", image, err) - } - monitor.Add(1) - prefetchCh <- prefetch{ - image: image, - auth: authConfig, - monitor: monitor, - } - - scheduledImages++ - // report status every 25% of images scheduled - if scheduledImages%updateIncrement == 0 { - klog.Infof("Completed scheduling %d%% of images", (scheduledImages*100)/totalImages) - } - } - } - return nil -} - -// ensurePullImage first checks if the image exists locally and then will attempt to pull -// the image from the container runtime with a retry/backoff. -func ensurePullImage(ctx context.Context, client *cri.Client, backoff wait.Backoff, image string, authConfig *runtimeapi.AuthConfig) error { - exists, err := client.ImageStatus(ctx, image) - if err != nil { - return err - } - if exists { - klog.V(4).Infof("image %q already exists", image) - return nil - } - - var lastErr error - tries := 0 - err = wait.ExponentialBackoffWithContext(ctx, backoff, func(ctx context.Context) (bool, error) { - tries++ - err := client.PullImage(ctx, image, authConfig) - if err != nil { - lastErr = err - return false, nil - } - return true, nil - }) - // this is only an error if ctx has error or backoff limits are exceeded - if err != nil { - return fmt.Errorf("%w %q (%d tries): %w: %w", errFailedToPullImage, image, tries, err, lastErr) - } - - // successful pull - klog.V(4).Infof("image %q pulled", image) - return nil -} - -func (p *PinnedImageSetManager) crioReload() error { - serviceName := constants.CRIOServiceName - if err := reloadService(serviceName); err != nil { - return fmt.Errorf("could not apply update: reloading %s configuration failed. Error: %w", serviceName, err) - } - return nil -} - -func (p *PinnedImageSetManager) deleteCrioConfigFile() error { - // remove the crio config file - if err := os.Remove(crioPinnedImagesDropInFilePath); err != nil { - return fmt.Errorf("failed to remove crio config file: %w", err) - } - klog.Infof("removed crio config file: %s", crioPinnedImagesDropInFilePath) - - return nil -} - -func (p *PinnedImageSetManager) hasPinnedImagesConfigFile() (bool, error) { - _, err := os.Stat(crioPinnedImagesDropInFilePath) - if os.IsNotExist(err) { - return false, nil - } - if err != nil { - return false, fmt.Errorf("failed to check crio config file: %w", err) - } - - return true, nil -} - -// createCrioConfigIgnitionFile creates an ignition file for the pinned-image crio config. -func createCrioConfigIgnitionFile(images []string) (ign3types.File, error) { - type tomlConfig struct { - Crio struct { - Image struct { - PinnedImages []string `toml:"pinned_images,omitempty"` - } `toml:"image"` - } `toml:"crio"` - } - - tomlConf := tomlConfig{} - tomlConf.Crio.Image.PinnedImages = images - - // TODO: custom encoder that can write each image on newline - var buf bytes.Buffer - encoder := toml.NewEncoder(&buf) - if err := encoder.Encode(tomlConf); err != nil { - return ign3types.File{}, fmt.Errorf("failed to encode toml: %w", err) - } - - return ctrlcommon.NewIgnFileBytes(crioPinnedImagesDropInFilePath, buf.Bytes()), nil -} - func (p *PinnedImageSetManager) Run(workers int, stopCh <-chan struct{}) { ctx, cancel := context.WithCancel(context.Background()) defer func() { @@ -1052,12 +940,12 @@ func (p *PinnedImageSetManager) deleteMachineConfigPool(obj interface{}) { return } - if err := p.deleteCrioConfigFile(); err != nil { + if err := deleteCrioConfigFile(); err != nil { klog.Errorf("failed to delete crio config file: %v", err) return } - p.crioReload() + crioReload() } func (p *PinnedImageSetManager) enqueue(pool *mcfgv1.MachineConfigPool) { @@ -1106,6 +994,184 @@ func (p *PinnedImageSetManager) handleErr(err error, key interface{}) { p.queue.AddAfter(key, 1*time.Minute) } +// ensurePullImage first checks if the image exists locally and then will attempt to pull +// the image from the container runtime with a retry/backoff. +func ensurePullImage(ctx context.Context, client *cri.Client, backoff wait.Backoff, image string, authConfig *runtimeapi.AuthConfig) error { + exists, err := client.ImageStatus(ctx, image) + if err != nil { + return err + } + if exists { + klog.V(4).Infof("image %q already exists", image) + return nil + } + + var lastErr error + tries := 0 + err = wait.ExponentialBackoffWithContext(ctx, backoff, func(ctx context.Context) (bool, error) { + tries++ + err := client.PullImage(ctx, image, authConfig) + if err != nil { + lastErr = err + // fail fast if out of space + if isErrNoSpace(err) { + return false, err + } + return false, nil + } + return true, nil + }) + // this is only an error if ctx has error or backoff limits are exceeded + if err != nil { + return fmt.Errorf("%w %q (%d tries): %w: %w", errFailedToPullImage, image, tries, err, lastErr) + } + + // successful pull + klog.V(4).Infof("image %q pulled", image) + return nil +} + +func isErrNoSpace(err error) bool { + if err == syscall.ENOSPC { + return true + } + if werr, ok := err.(*os.PathError); ok { + if werr.Err == syscall.ENOSPC { + return true + } + } + return false +} + +func uniqueSortedImageNames(images []mcfgv1alpha1.PinnedImageRef) []string { + seen := make(map[string]struct{}) + var unique []string + + for _, image := range images { + if _, ok := seen[image.Name]; !ok { + seen[image.Name] = struct{}{} + unique = append(unique, image.Name) + } + } + + sort.Strings(unique) + + return unique +} + +func crioReload() error { + serviceName := constants.CRIOServiceName + if err := reloadService(serviceName); err != nil { + return fmt.Errorf("could not apply update: reloading %s configuration failed. Error: %w", serviceName, err) + } + return nil +} + +func deleteCrioConfigFile() error { + // remove the crio config file + if err := os.Remove(crioPinnedImagesDropInFilePath); err != nil { + return fmt.Errorf("failed to remove crio config file: %w", err) + } + klog.Infof("removed crio config file: %s", crioPinnedImagesDropInFilePath) + + return nil +} + +func hasConfigFile(path string) (bool, error) { + _, err := os.Stat(path) + if os.IsNotExist(err) { + return false, nil + } + if err != nil { + return false, fmt.Errorf("failed to check crio config file: %w", err) + } + + return true, nil +} + +// createCrioConfigFileBytes creates a crio config file with the pinned images. +func createCrioConfigFileBytes(images []string) ([]byte, error) { + type tomlConfig struct { + Crio struct { + Image struct { + PinnedImages []string `toml:"pinned_images,omitempty"` + } `toml:"image"` + } `toml:"crio"` + } + + tomlConf := tomlConfig{} + tomlConf.Crio.Image.PinnedImages = images + + var buf bytes.Buffer + encoder := toml.NewEncoder(&buf) + if err := encoder.Encode(tomlConf); err != nil { + return nil, fmt.Errorf("failed to encode toml: %w", err) + } + + return buf.Bytes(), nil +} + +func isImageSetInPool(imageSet string, pool *mcfgv1.MachineConfigPool) bool { + for _, set := range pool.Spec.PinnedImageSets { + if set.Name == imageSet { + return true + } + } + return false +} + +func isNodeInPool(nodes []*corev1.Node, nodeName string) bool { + for _, n := range nodes { + if n.Name == nodeName { + return true + } + } + return false +} + +// imageCache is a thread-safe cache for storing image information. +type imageCache struct { + mu sync.RWMutex + cache *lru.Cache +} + +func newImageCache(maxEntries int) *imageCache { + return &imageCache{ + cache: lru.New(maxEntries), + } +} + +func (c *imageCache) Add(key, value interface{}) { + c.mu.Lock() + c.cache.Add(key, value) + c.mu.Unlock() +} + +func (c *imageCache) Get(key interface{}) (value interface{}, ok bool) { + c.mu.RLock() + defer c.mu.RUnlock() + return c.cache.Get(key) +} + +func (c *imageCache) Clear() { + c.mu.Lock() + c.cache.Clear() + c.mu.Unlock() +} + +func (c *imageCache) Remove(key interface{}) { + c.mu.Lock() + c.cache.Remove(key) + c.mu.Unlock() +} + +// imageInfo is used to store image information in the cache. +type imageInfo struct { + Name string + Size int64 + Pulled bool +} + func getImageSize(ctx context.Context, imageName, authFilePath string) (int64, error) { args := []string{ "manifest", @@ -1151,20 +1217,21 @@ func triggerMachineConfigPoolChange(old, new *mcfgv1.MachineConfigPool) bool { if old.DeletionTimestamp != new.DeletionTimestamp { return true } - if !reflect.DeepEqual(old.Spec, new.Spec) { + if !reflect.DeepEqual(old.Spec.PinnedImageSets, new.Spec.PinnedImageSets) { return true } return false } -// registryAuth manages a map of registry to auth config. +// registryAuth manages the registry authentication for pulling images. type registryAuth struct { - mu sync.Mutex + mu sync.RWMutex auth map[string]*runtimeapi.AuthConfig + reg map[string]*sysregistriesv2.Registry } -func newRegistryAuth(authfile string) (*registryAuth, error) { - data, err := os.ReadFile(authfile) +func newRegistryAuth(authFilePath, registryCfgPath string) (*registryAuth, error) { + data, err := os.ReadFile(authFilePath) if err != nil { return nil, fmt.Errorf("failed to read authfile: %w", err) } @@ -1201,23 +1268,67 @@ func newRegistryAuth(authfile string) (*registryAuth, error) { } } - return ®istryAuth{auth: authMap}, nil + sys := &types.SystemContext{ + SystemRegistriesConfPath: registryCfgPath, + } + regs, err := sysregistriesv2.GetRegistries(sys) + if err != nil { + return nil, fmt.Errorf("failed to get registries: %w", err) + } + + regMap := make(map[string]*sysregistriesv2.Registry, len(regs)) + for _, reg := range regs { + regMap[reg.Prefix] = ® + } + + return ®istryAuth{auth: authMap, reg: regMap}, nil } -func (r *registryAuth) getAuthConfigForImage(image string) (*runtimeapi.AuthConfig, error) { - r.mu.Lock() - defer r.mu.Unlock() +func (r *registryAuth) getAuth(domain string) *runtimeapi.AuthConfig { + r.mu.RLock() + defer r.mu.RUnlock() + return r.auth[domain] +} + +func (r *registryAuth) getRegistry(prefix string) *sysregistriesv2.Registry { + r.mu.RLock() + defer r.mu.RUnlock() + return r.reg[prefix] +} +func (r *registryAuth) getAuthConfigForImage(image string) (*runtimeapi.AuthConfig, error) { parsed, err := reference.ParseNormalizedNamed(strings.TrimSpace(image)) if err != nil { return nil, err } - auth, ok := r.auth[reference.Domain(parsed)] - if !ok { - return nil, fmt.Errorf("%w: %q", errNoAuthForImage, image) + // check if the image is has a mirror if found match source to existing auth if possible + parts := strings.Split(image, "@") + registry := r.getRegistry(parts[0]) + if registry != nil { + sources, err := registry.PullSourcesFromReference(parsed) + if err != nil { + return nil, err + } + for _, source := range sources { + pref, err := reference.ParseNamed(source.Endpoint.Location) + if err != nil { + return nil, err + } + if auth := r.getAuth(reference.Domain(pref)); auth != nil { + return auth, nil + } + } + } + + if auth := r.getAuth(reference.Domain(parsed)); auth != nil { + return auth, nil + } - return auth, nil + + // no auth found for the image this is not an error for public images + klog.V(4).Infof("no auth found for image: %s", image) + return nil, nil } // prefetch represents a task to prefetch an image. diff --git a/pkg/daemon/pinned_image_set_test.go b/pkg/daemon/pinned_image_set_test.go index 1beba13584..f5082fa857 100644 --- a/pkg/daemon/pinned_image_set_test.go +++ b/pkg/daemon/pinned_image_set_test.go @@ -2,62 +2,72 @@ package daemon import ( "context" - "flag" "fmt" "net" "os" "path/filepath" + "strings" "testing" "time" - "github.com/golang/groupcache/lru" - mcfgv1 "github.com/openshift/api/machineconfiguration/v1" - mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" - fakemco "github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake" - mcfginformers "github.com/openshift/client-go/machineconfiguration/informers/externalversions" - "github.com/openshift/machine-config-operator/pkg/daemon/cri" "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/util/workqueue" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" - v1 "k8s.io/cri-api/pkg/apis/runtime/v1" apitest "k8s.io/cri-api/pkg/apis/testing" - "k8s.io/klog/v2" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + mcfgv1 "github.com/openshift/api/machineconfiguration/v1" + mcfgv1alpha1 "github.com/openshift/api/machineconfiguration/v1alpha1" + fakemco "github.com/openshift/client-go/machineconfiguration/clientset/versioned/fake" + mcfginformers "github.com/openshift/client-go/machineconfiguration/informers/externalversions" + "github.com/openshift/machine-config-operator/pkg/daemon/cri" ) -func TestMain(m *testing.M) { - klog.InitFlags(nil) - flag.Set("logtostderr", "true") - flag.Set("v", "2") - flag.Parse() - os.Exit(m.Run()) -} - -const authFileContents = `{"auths":{ +const ( + authFileContents = `{"auths":{ "quay.io": {"auth": "Zm9vOmJhcg=="}, - "quay.io:443": {"auth": "Zm9vOmJheg=="} + "quay.io:443": {"auth": "Zm9vOmJheg=="}, + "registry.ci.openshift.org": {"auth": "Zm9vOnphcg=="} }}` + registryConfig = `unqualified-search-registries = ["registry.access.redhat.com", "docker.io"] +short-name-mode = "" + +[[registry]] + prefix = "" + location = "example.io/digest-example/release" + blocked = true + + [[registry.mirror]] + location = "registry.ci.openshift.org/ocp/release" + pull-from-mirror = "digest-only"` +) + func TestRegistryAuth(t *testing.T) { require := require.New(t) - authFilePath := filepath.Join(t.TempDir(), "auth.json") - bogusFilePath := filepath.Join(t.TempDir(), "bogus.json") + tmpDir := t.TempDir() + + authFilePath := filepath.Join(tmpDir, "auth.json") + bogusFilePath := filepath.Join(tmpDir, "bogus.json") err := os.WriteFile(authFilePath, []byte(authFileContents), 0644) require.NoError(err) + registryConfigPath := filepath.Join(tmpDir, "registries.conf") + err = os.WriteFile(registryConfigPath, []byte(registryConfig), 0644) + require.NoError(err) tests := []struct { name string authFile string image string - wantAuth v1.AuthConfig + wantAuth *runtimeapi.AuthConfig wantErr error wantImageErr error }{ @@ -70,7 +80,7 @@ func TestRegistryAuth(t *testing.T) { name: "valid auth file", authFile: authFilePath, image: "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6134ca5fbcf8e4007de2d319758dcd7b4e5ded97a00c78a7ff490c1f56579d49", - wantAuth: v1.AuthConfig{ + wantAuth: &runtimeapi.AuthConfig{ Username: "foo", Password: "bar", }, @@ -79,21 +89,30 @@ func TestRegistryAuth(t *testing.T) { name: "valid auth file with port", authFile: authFilePath, image: "quay.io:443/openshift-release-dev/ocp-v4.0-art-dev@sha256:6134ca5fbcf8e4007de2d319758dcd7b4e5ded97a00c78a7ff490c1f56579d49", - wantAuth: v1.AuthConfig{ + wantAuth: &runtimeapi.AuthConfig{ Username: "foo", Password: "baz", }, }, { - name: "no valid registry auth for image", - authFile: authFilePath, - image: "docker.io/no/auth", - wantImageErr: errNoAuthForImage, + name: "no valid registry auth for image", + authFile: authFilePath, + image: "docker.io/no/auth", + wantAuth: nil, + }, + { + name: "mirrored registry send auth for mirror", + authFile: authFilePath, + image: "example.io/digest-example/release@sha256:0704fcca246287435294dcf12b21825689c899c2365b6fc8d089d80efbae742a", + wantAuth: &runtimeapi.AuthConfig{ + Username: "foo", + Password: "zar", + }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - registry, err := newRegistryAuth(tt.authFile) + registry, err := newRegistryAuth(tt.authFile, registryConfigPath) if tt.wantErr != nil { require.ErrorIs(err, tt.wantErr) return @@ -105,92 +124,7 @@ func TestRegistryAuth(t *testing.T) { return } require.NoError(err) - require.NotNil(cfg) - require.Equal(tt.wantAuth, *cfg) - }) - } -} - -func TestEnsurePinnedImageSetForNode(t *testing.T) { - require := require.New(t) - tests := []struct { - name string - nodes []runtime.Object - machineConfigPools []runtime.Object - wantValidTarget bool - wantErr error - wantNotFound bool - }{ - { - name: "happy path", - machineConfigPools: []runtime.Object{ - fakeWorkerPoolPinnedImageSets, - }, - nodes: []runtime.Object{ - fakeStableStorageWorkerNode, - }, - wantValidTarget: true, - }, - { - name: "pool has no pinned image sets", - machineConfigPools: []runtime.Object{ - fakeWorkerPoolNoPinnedImageSets, - }, - nodes: []runtime.Object{ - fakeStableStorageWorkerNode, - }, - wantValidTarget: true, - }, - { - name: "no matching pools for node", - machineConfigPools: []runtime.Object{ - fakeMasterPoolPinnedImageSets, - }, - nodes: []runtime.Object{ - fakeStableStorageWorkerNode, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - fakeMCOClient := fakemco.NewSimpleClientset(tt.machineConfigPools...) - fakeClient := fake.NewSimpleClientset(tt.nodes...) - sharedInformers := mcfginformers.NewSharedInformerFactory(fakeMCOClient, noResyncPeriodFunc()) - informerFactory := informers.NewSharedInformerFactory(fakeClient, noResyncPeriodFunc()) - mcpInformer := sharedInformers.Machineconfiguration().V1().MachineConfigPools() - nodeInformer := informerFactory.Core().V1().Nodes() - - informerFactory.Start(ctx.Done()) - informerFactory.WaitForCacheSync(ctx.Done()) - sharedInformers.Start(ctx.Done()) - sharedInformers.WaitForCacheSync(ctx.Done()) - - for _, mcp := range tt.machineConfigPools { - err := mcpInformer.Informer().GetIndexer().Add(mcp) - require.NoError(err) - } - for _, node := range tt.nodes { - err := nodeInformer.Informer().GetIndexer().Add(node) - require.NoError(err) - } - - var localNodeName string - if len(tt.nodes) == 0 { - localNodeName = "bogus" - } else { - localNode, ok := tt.nodes[0].(*corev1.Node) - require.True(ok) - localNodeName = localNode.Name - } - - pool := tt.machineConfigPools[0].(*mcfgv1.MachineConfigPool) - validTarget, err := validateTargetPoolForNode(localNodeName, nodeInformer.Lister(), pool) - require.NoError(err) - require.Equal(tt.wantValidTarget, validTarget) - + require.Equal(tt.wantAuth, cfg) }) } } @@ -198,10 +132,14 @@ func TestEnsurePinnedImageSetForNode(t *testing.T) { func TestPrefetchImageSets(t *testing.T) { require := require.New(t) - // populate authfile - authFilePath := filepath.Join(t.TempDir(), "auth.json") + // populate authfile and registry config + tmpDir := t.TempDir() + authFilePath := filepath.Join(tmpDir, "auth.json") err := os.WriteFile(authFilePath, []byte(authFileContents), 0644) require.NoError(err) + registryConfigPath := filepath.Join(tmpDir, "registries.conf") + err = os.WriteFile(registryConfigPath, []byte(registryConfig), 0644) + require.NoError(err) tests := []struct { name string @@ -380,6 +318,7 @@ func TestPrefetchImageSets(t *testing.T) { nodeName: localNodeName, criClient: criClient, authFilePath: tt.authFilePath, + registryCfgPath: registryConfigPath, imageSetLister: imageSetInformer.Lister(), imageSetSynced: imageSetInformer.Informer().HasSynced, nodeLister: nodeInformer.Lister(), @@ -391,7 +330,7 @@ func TestPrefetchImageSets(t *testing.T) { Factor: retryFactor, Cap: 10 * time.Millisecond, }, - cache: lru.New(256), + cache: newImageCache(256), } imageSets := make([]*mcfgv1alpha1.PinnedImageSet, len(tt.imageSets)) @@ -417,53 +356,137 @@ func TestPrefetchImageSets(t *testing.T) { } } -var ( - fakeStableStorageWorkerNode = fakeNode( - "worker", - "100Gi", - ).DeepCopy() - - fakeWorkerPoolPinnedImageSets = fakeMachineConfigPool( - "worker", - map[string]string{ - "pools.operator.machineconfiguration.openshift.io/worker": "", +func TestPinnedImageSetAddEventHandlers(t *testing.T) { + require := require.New(t) + tests := []struct { + name string + nodes []runtime.Object + machineConfigPools []runtime.Object + currentImageSet *mcfgv1alpha1.PinnedImageSet + wantCacheElements int + }{ + { + name: "happy path", + machineConfigPools: []runtime.Object{fakeWorkerPoolPinnedImageSets}, + nodes: []runtime.Object{fakeStableStorageWorkerNode}, + wantCacheElements: 1, + currentImageSet: workerPis, }, - []mcfgv1.PinnedImageSetRef{ - { - Name: "fake-pinned-images", - }, + { + name: "worker pool with no pinned image sets", + machineConfigPools: []runtime.Object{fakeWorkerPoolNoPinnedImageSets}, + nodes: []runtime.Object{fakeStableStorageWorkerNode}, + wantCacheElements: 0, + currentImageSet: workerPis, }, - ) - - fakeWorkerPoolNoPinnedImageSets = fakeMachineConfigPool( - "worker", - map[string]string{ - "pools.operator.machineconfiguration.openshift.io/worker": "", + { + name: "worker pool master pinned image sets", + machineConfigPools: []runtime.Object{fakeWorkerPoolNoPinnedImageSets}, + nodes: []runtime.Object{fakeStableStorageWorkerNode}, + wantCacheElements: 0, + currentImageSet: masterPis, }, - []mcfgv1.PinnedImageSetRef{}, - ) - - fakeMasterPoolPinnedImageSets = fakeMachineConfigPool( - "master", - map[string]string{ - "pools.operator.machineconfiguration.openshift.io/master": "", - "pinnedimageset-opt-in": "true", + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + fakeMCOClient := fakemco.NewSimpleClientset(tt.machineConfigPools...) + fakeClient := fake.NewSimpleClientset(tt.nodes...) + sharedInformers := mcfginformers.NewSharedInformerFactory(fakeMCOClient, noResyncPeriodFunc()) + informerFactory := informers.NewSharedInformerFactory(fakeClient, noResyncPeriodFunc()) + mcpInformer := sharedInformers.Machineconfiguration().V1().MachineConfigPools() + nodeInformer := informerFactory.Core().V1().Nodes() + + informerFactory.Start(ctx.Done()) + informerFactory.WaitForCacheSync(ctx.Done()) + sharedInformers.Start(ctx.Done()) + sharedInformers.WaitForCacheSync(ctx.Done()) + + for _, mcp := range tt.machineConfigPools { + err := mcpInformer.Informer().GetIndexer().Add(mcp) + require.NoError(err) + } + for _, node := range tt.nodes { + err := nodeInformer.Informer().GetIndexer().Add(node) + require.NoError(err) + } + + nodeName := tt.nodes[0].(*corev1.Node).Name + p := &PinnedImageSetManager{ + nodeName: nodeName, + nodeLister: nodeInformer.Lister(), + mcpLister: mcpInformer.Lister(), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pinned-image-set-manager"), + } + p.enqueueMachineConfigPool = p.enqueue + p.addPinnedImageSet(tt.currentImageSet) + require.Equal(tt.wantCacheElements, p.queue.Len()) + }) + } +} + +func TestEnsureCrioPinnedImagesConfigFile(t *testing.T) { + require := require.New(t) + tmpDir := t.TempDir() + imageNames := []string{ + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc", + } + + newCfgBytes, err := createCrioConfigFileBytes(imageNames) + require.NoError(err) + testCfgPath := filepath.Join(tmpDir, "50-pinned-images") + err = os.WriteFile(testCfgPath, newCfgBytes, 0644) + require.NoError(err) + err = ensureCrioPinnedImagesConfigFile(testCfgPath, imageNames) + require.NoError(err) + + imageNames = append(imageNames, "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd") + err = ensureCrioPinnedImagesConfigFile(testCfgPath, imageNames) + require.ErrorIs(err, os.ErrPermission) // this error is from atomic writer attempting to write. +} + +func fakePinnedImageSet(name, image string, labels map[string]string) *mcfgv1alpha1.PinnedImageSet { + return &mcfgv1alpha1.PinnedImageSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, }, - []mcfgv1.PinnedImageSetRef{ - { - Name: "fake-master-pinned-images", + Spec: mcfgv1alpha1.PinnedImageSetSpec{ + PinnedImages: []mcfgv1alpha1.PinnedImageRef{ + { + Name: image, + }, }, }, - ) + } +} + +var ( + masterPis = fakePinnedImageSet("master-set", "image1", map[string]string{"machineconfiguration.openshift.io/role": "master"}) + workerPis = fakePinnedImageSet("worker-set", "image1", map[string]string{"machineconfiguration.openshift.io/role": "worker"}) + fakeStableStorageWorkerNode = fakeNode([]string{"worker"}, "100Gi") + fakeWorkerPoolPinnedImageSets = fakeMachineConfigPool("worker", []mcfgv1.PinnedImageSetRef{{Name: "worker-set"}}) + fakeWorkerPoolNoPinnedImageSets = fakeMachineConfigPool("worker", nil) + availableImage = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0000000000000000000000000000000000000000000000000000000000000000" + // slowImage is a fake image that is used to block the image puller. This simulates a slow image pull of 1 seconds. + slowImage = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" ) -func fakeNode(role, allocatableStorage string) *corev1.Node { +func fakeNode(roles []string, allocatableStorage string) *corev1.Node { + labels := map[string]string{} + for _, role := range roles { + labels[fmt.Sprintf("node-role.kubernetes.io/%s", role)] = "" + } + + name := strings.Join(roles, "-") return &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ - Name: "test-" + role, - Labels: map[string]string{ - fmt.Sprintf("node-role.kubernetes.io/%s", role): "", - }, + Name: "test-" + name, + Labels: labels, }, Status: corev1.NodeStatus{ Capacity: corev1.ResourceList{ @@ -474,35 +497,30 @@ func fakeNode(role, allocatableStorage string) *corev1.Node { } } -func fakeMachineConfigPool(name string, labels map[string]string, pinnedImageSets []mcfgv1.PinnedImageSetRef) *mcfgv1.MachineConfigPool { +func fakeMachineConfigPool(role string, pinnedImageSets []mcfgv1.PinnedImageSetRef) *mcfgv1.MachineConfigPool { return &mcfgv1.MachineConfigPool{ ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: labels, + Name: role, + Labels: map[string]string{ + fmt.Sprintf("pools.operator.machineconfiguration.openshift.io/%s", role): "", + }, }, Spec: mcfgv1.MachineConfigPoolSpec{ PinnedImageSets: pinnedImageSets, MachineConfigSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ - "machineconfiguration.openshift.io/role": name, + "machineconfiguration.openshift.io/role": role, }, }, NodeSelector: &metav1.LabelSelector{ MatchLabels: map[string]string{ - fmt.Sprintf("node-role.kubernetes.io/%s", name): "", + fmt.Sprintf("node-role.kubernetes.io/%s", role): "", }, }, }, } } -const ( - availableImage = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0000000000000000000000000000000000000000000000000000000000000000" - unavailableImage = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - // slowImage is a fake image that is used to block the image puller. This simulates a slow image pull of 1 seconds. - slowImage = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" -) - var _ runtimeapi.ImageServiceServer = (*FakeRuntime)(nil) // FakeRuntime represents a fake remote container runtime. diff --git a/pkg/operator/operator_test.go b/pkg/operator/operator_test.go index 6558e887d4..3f258fc3c0 100644 --- a/pkg/operator/operator_test.go +++ b/pkg/operator/operator_test.go @@ -7,6 +7,7 @@ import ( configv1 "github.com/openshift/api/config/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" fakeconfigclientset "github.com/openshift/client-go/config/clientset/versioned/fake" + "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" "github.com/openshift/machine-config-operator/test/helpers" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/stretchr/testify/assert" @@ -22,6 +23,9 @@ import ( func TestMetrics(t *testing.T) { optr := &Operator{ eventRecorder: &record.FakeRecorder{}, + fgAccessor: featuregates.NewHardcodedFeatureGateAccess( + []configv1.FeatureGateName{configv1.FeatureGatePinnedImages}, []configv1.FeatureGateName{}, + ), } optr.vStore = newVersionStore() diff --git a/pkg/operator/status.go b/pkg/operator/status.go index 347939aea3..2cd0fa4322 100644 --- a/pkg/operator/status.go +++ b/pkg/operator/status.go @@ -13,6 +13,7 @@ import ( configv1 "github.com/openshift/api/config/v1" cov1helpers "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers" + "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -725,27 +726,36 @@ func (optr *Operator) allMachineConfigPoolStatus() (map[string]string, error) { if err != nil { return nil, err } + + fg, err := optr.fgAccessor.CurrentFeatureGates() + if err != nil { + return nil, err + } + if fg == nil { + return nil, fmt.Errorf("feature gates are nil") + } + ret := map[string]string{} for _, pool := range pools { - ret[pool.GetName()] = machineConfigPoolStatus(pool) + ret[pool.GetName()] = machineConfigPoolStatus(fg, pool) } return ret, nil } // isMachineConfigPoolConfigurationValid returns nil, or error when the configuration of a `pool` is created by the controller at version `version`, // when the osImageURL does not match what's in the configmap or when the rendered-config-xxx does not match the OCP release version. -func isMachineConfigPoolConfigurationValid(pool *mcfgv1.MachineConfigPool, version, releaseVersion, osURL string, machineConfigGetter func(string) (*mcfgv1.MachineConfig, error)) error { +func isMachineConfigPoolConfigurationValid(fg featuregates.FeatureGate, pool *mcfgv1.MachineConfigPool, version, releaseVersion, osURL string, machineConfigGetter func(string) (*mcfgv1.MachineConfig, error)) error { // both .status.configuration.name and .status.configuration.source must be set. if pool.Spec.Configuration.Name == "" { - return fmt.Errorf("configuration spec for pool %s is empty: %v", pool.GetName(), machineConfigPoolStatus(pool)) + return fmt.Errorf("configuration spec for pool %s is empty: %v", pool.GetName(), machineConfigPoolStatus(fg, pool)) } if pool.Status.Configuration.Name == "" { // if status is empty, it means the node controller hasn't seen any node at the target configuration // we bubble up any error from the pool to make the info more visible - return fmt.Errorf("configuration status for pool %s is empty: %s", pool.GetName(), machineConfigPoolStatus(pool)) + return fmt.Errorf("configuration status for pool %s is empty: %s", pool.GetName(), machineConfigPoolStatus(fg, pool)) } if len(pool.Status.Configuration.Source) == 0 { - return fmt.Errorf("list of MachineConfigs that were used to generate configuration for pool %s is empty: %v", pool.GetName(), machineConfigPoolStatus(pool)) + return fmt.Errorf("list of MachineConfigs that were used to generate configuration for pool %s is empty: %v", pool.GetName(), machineConfigPoolStatus(fg, pool)) } mcs := []string{pool.Status.Configuration.Name} for _, fragment := range pool.Status.Configuration.Source { @@ -764,13 +774,13 @@ func isMachineConfigPoolConfigurationValid(pool *mcfgv1.MachineConfigPool, versi // The bootstrapped MCs fragments have this annotation, however, we don't fail (???) if they don't have // the annotation for some reason. if !ok && pool.Status.Configuration.Name == mcName { - return fmt.Errorf("%s must be created by controller version %s: %v", mcName, version, machineConfigPoolStatus(pool)) + return fmt.Errorf("%s must be created by controller version %s: %v", mcName, version, machineConfigPoolStatus(fg, pool)) } // user provided MC fragments do not have the annotation, so we just skip the version check there. // The check below is for: 1) the generated MC for the pool, and 2) the bootstrapped fragments // that do have this annotation set with a version. if ok && v != version { - return fmt.Errorf("controller version mismatch for %s expected %s has %s: %v", mcName, version, v, machineConfigPoolStatus(pool)) + return fmt.Errorf("controller version mismatch for %s expected %s has %s: %v", mcName, version, v, machineConfigPoolStatus(fg, pool)) } } // all MCs were generated by correct controller, but osImageURL is not a source, so let's double check here @@ -805,7 +815,7 @@ func isMachineConfigPoolConfigurationValid(pool *mcfgv1.MachineConfigPool, versi return nil } -func machineConfigPoolStatus(pool *mcfgv1.MachineConfigPool) string { +func machineConfigPoolStatus(fg featuregates.FeatureGate, pool *mcfgv1.MachineConfigPool) string { switch { case apihelpers.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolRenderDegraded): cond := apihelpers.GetMachineConfigPoolCondition(pool.Status, mcfgv1.MachineConfigPoolRenderDegraded) @@ -817,10 +827,13 @@ func machineConfigPoolStatus(pool *mcfgv1.MachineConfigPool) string { return fmt.Sprintf("all %d nodes are at latest configuration %s", pool.Status.MachineCount, pool.Status.Configuration.Name) case apihelpers.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolUpdating): return fmt.Sprintf("%d (ready %d) out of %d nodes are updating to latest configuration %s", pool.Status.UpdatedMachineCount, pool.Status.ReadyMachineCount, pool.Status.MachineCount, pool.Spec.Configuration.Name) - case apihelpers.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolPinnedImageSetsDegraded): - cond := apihelpers.GetMachineConfigPoolCondition(pool.Status, mcfgv1.MachineConfigPoolPinnedImageSetsDegraded) - return fmt.Sprintf("pool is degraded because pinned image sets failed with %q: %q", cond.Reason, cond.Message) default: + if fg.Enabled(configv1.FeatureGatePinnedImages) { + if apihelpers.IsMachineConfigPoolConditionTrue(pool.Status.Conditions, mcfgv1.MachineConfigPoolPinnedImageSetsDegraded) { + cond := apihelpers.GetMachineConfigPoolCondition(pool.Status, mcfgv1.MachineConfigPoolPinnedImageSetsDegraded) + return fmt.Sprintf("pool is degraded because pinned image sets failed with %q: %q", cond.Reason, cond.Message) + } + } return "" } } diff --git a/pkg/operator/status_test.go b/pkg/operator/status_test.go index fe93b1bffd..45f2235cf3 100644 --- a/pkg/operator/status_test.go +++ b/pkg/operator/status_test.go @@ -19,10 +19,12 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/util/uuid" + apicfgv1 "github.com/openshift/api/config/v1" configv1 "github.com/openshift/api/config/v1" mcfgv1 "github.com/openshift/api/machineconfiguration/v1" fakeconfigclientset "github.com/openshift/client-go/config/clientset/versioned/fake" cov1helpers "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers" + "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" ctrlcommon "github.com/openshift/machine-config-operator/pkg/controller/common" "github.com/openshift/machine-config-operator/test/helpers" ) @@ -189,7 +191,19 @@ func TestIsMachineConfigPoolConfigurationValid(t *testing.T) { source = append(source, corev1.ObjectReference{Name: s}) } - err := isMachineConfigPoolConfigurationValid(&mcfgv1.MachineConfigPool{ + fgAccess := featuregates.NewHardcodedFeatureGateAccess( + []apicfgv1.FeatureGateName{ + apicfgv1.FeatureGateMachineConfigNodes, + apicfgv1.FeatureGatePinnedImages, + }, + []apicfgv1.FeatureGateName{}, + ) + fg, err := fgAccess.CurrentFeatureGates() + if err != nil { + t.Fatal(err) + } + + err = isMachineConfigPoolConfigurationValid(fg, &mcfgv1.MachineConfigPool{ ObjectMeta: metav1.ObjectMeta{ Name: "dummy-pool", }, @@ -626,6 +640,9 @@ func TestOperatorSyncStatus(t *testing.T) { } { optr := &Operator{ eventRecorder: &record.FakeRecorder{}, + fgAccessor: featuregates.NewHardcodedFeatureGateAccess( + []configv1.FeatureGateName{configv1.FeatureGatePinnedImages}, []configv1.FeatureGateName{}, + ), } optr.vStore = newVersionStore() optr.mcpLister = &mockMCPLister{ @@ -697,6 +714,9 @@ func TestOperatorSyncStatus(t *testing.T) { func TestInClusterBringUpStayOnErr(t *testing.T) { optr := &Operator{ eventRecorder: &record.FakeRecorder{}, + fgAccessor: featuregates.NewHardcodedFeatureGateAccess( + []configv1.FeatureGateName{configv1.FeatureGatePinnedImages}, []configv1.FeatureGateName{}, + ), } optr.vStore = newVersionStore() optr.vStore.Set("operator", "test-version") @@ -759,6 +779,9 @@ func TestKubeletSkewUnSupported(t *testing.T) { } optr := &Operator{ eventRecorder: &record.FakeRecorder{}, + fgAccessor: featuregates.NewHardcodedFeatureGateAccess( + []configv1.FeatureGateName{configv1.FeatureGatePinnedImages}, []configv1.FeatureGateName{}, + ), } optr.vStore = newVersionStore() optr.vStore.Set("operator", "test-version") @@ -846,6 +869,9 @@ func TestCustomPoolKubeletSkewUnSupported(t *testing.T) { } optr := &Operator{ eventRecorder: &record.FakeRecorder{}, + fgAccessor: featuregates.NewHardcodedFeatureGateAccess( + []configv1.FeatureGateName{configv1.FeatureGatePinnedImages}, []configv1.FeatureGateName{}, + ), } optr.vStore = newVersionStore() optr.vStore.Set("operator", "test-version") @@ -933,6 +959,9 @@ func TestKubeletSkewSupported(t *testing.T) { } optr := &Operator{ eventRecorder: &record.FakeRecorder{}, + fgAccessor: featuregates.NewHardcodedFeatureGateAccess( + []configv1.FeatureGateName{configv1.FeatureGatePinnedImages}, []configv1.FeatureGateName{}, + ), } optr.vStore = newVersionStore() optr.vStore.Set("operator", "test-version") diff --git a/pkg/operator/sync.go b/pkg/operator/sync.go index 91529f04ee..d4e220d20d 100644 --- a/pkg/operator/sync.go +++ b/pkg/operator/sync.go @@ -1420,6 +1420,15 @@ func (optr *Operator) syncMachineConfigServer(config *renderConfig) error { func (optr *Operator) syncRequiredMachineConfigPools(config *renderConfig) error { var lastErr error + fg, err := optr.fgAccessor.CurrentFeatureGates() + if err != nil { + return fmt.Errorf("could not get feature gates: %w", err) + } + + if fg == nil { + return fmt.Errorf("received nil feature gates") + } + ctx := context.TODO() // Calculate total timeout for "required"(aka master) nodes in the pool. @@ -1499,7 +1508,7 @@ func (optr *Operator) syncRequiredMachineConfigPools(config *renderConfig) error klog.Errorf("Failed to stamp bootimages configmap: %v", err) } - if err := isMachineConfigPoolConfigurationValid(pool, version.Hash, releaseVersion, opURL, optr.mcLister.Get); err != nil { + if err := isMachineConfigPoolConfigurationValid(fg, pool, version.Hash, releaseVersion, opURL, optr.mcLister.Get); err != nil { lastErr = fmt.Errorf("MachineConfigPool %s has not progressed to latest configuration: %w, retrying", pool.Name, err) syncerr := optr.syncUpgradeableStatus() if syncerr != nil { diff --git a/vendor/github.com/openshift/api/config/v1/feature_gates.go b/vendor/github.com/openshift/api/config/v1/feature_gates.go index 2786c2b30d..737c4e3221 100644 --- a/vendor/github.com/openshift/api/config/v1/feature_gates.go +++ b/vendor/github.com/openshift/api/config/v1/feature_gates.go @@ -175,126 +175,126 @@ var ( reportProblemsToJiraComponent("apiserver-auth"). contactPerson("stlaz"). productScope(kubernetes). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateServiceAccountTokenNodeBinding = newFeatureGate("ServiceAccountTokenNodeBinding"). reportProblemsToJiraComponent("apiserver-auth"). contactPerson("stlaz"). productScope(kubernetes). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateServiceAccountTokenPodNodeInfo = newFeatureGate("ServiceAccountTokenPodNodeInfo"). reportProblemsToJiraComponent("apiserver-auth"). contactPerson("stlaz"). productScope(kubernetes). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateValidatingAdmissionPolicy = newFeatureGate("ValidatingAdmissionPolicy"). reportProblemsToJiraComponent("kube-apiserver"). contactPerson("benluddy"). productScope(kubernetes). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateGatewayAPI = newFeatureGate("GatewayAPI"). reportProblemsToJiraComponent("Routing"). contactPerson("miciah"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateOpenShiftPodSecurityAdmission = newFeatureGate("OpenShiftPodSecurityAdmission"). reportProblemsToJiraComponent("auth"). contactPerson("stlaz"). productScope(ocpSpecific). - enableIn(Default, TechPreviewNoUpgrade). + enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateExternalCloudProvider = newFeatureGate("ExternalCloudProvider"). reportProblemsToJiraComponent("cloud-provider"). contactPerson("jspeed"). productScope(ocpSpecific). - enableIn(Default, TechPreviewNoUpgrade). + enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateExternalCloudProviderAzure = newFeatureGate("ExternalCloudProviderAzure"). reportProblemsToJiraComponent("cloud-provider"). contactPerson("jspeed"). productScope(ocpSpecific). - enableIn(Default, TechPreviewNoUpgrade). + enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateExternalCloudProviderGCP = newFeatureGate("ExternalCloudProviderGCP"). reportProblemsToJiraComponent("cloud-provider"). contactPerson("jspeed"). productScope(ocpSpecific). - enableIn(Default, TechPreviewNoUpgrade). + enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateExternalCloudProviderExternal = newFeatureGate("ExternalCloudProviderExternal"). reportProblemsToJiraComponent("cloud-provider"). contactPerson("elmiko"). productScope(ocpSpecific). - enableIn(Default, TechPreviewNoUpgrade). + enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateCSIDriverSharedResource = newFeatureGate("CSIDriverSharedResource"). reportProblemsToJiraComponent("builds"). contactPerson("adkaplan"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateBuildCSIVolumes = newFeatureGate("BuildCSIVolumes"). reportProblemsToJiraComponent("builds"). contactPerson("adkaplan"). productScope(ocpSpecific). - enableIn(Default, TechPreviewNoUpgrade). + enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateNodeSwap = newFeatureGate("NodeSwap"). reportProblemsToJiraComponent("node"). contactPerson("ehashman"). productScope(kubernetes). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateMachineAPIProviderOpenStack = newFeatureGate("MachineAPIProviderOpenStack"). reportProblemsToJiraComponent("openstack"). contactPerson("egarcia"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateInsightsConfigAPI = newFeatureGate("InsightsConfigAPI"). reportProblemsToJiraComponent("insights"). contactPerson("tremes"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateDynamicResourceAllocation = newFeatureGate("DynamicResourceAllocation"). reportProblemsToJiraComponent("scheduling"). contactPerson("jchaloup"). productScope(kubernetes). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateAzureWorkloadIdentity = newFeatureGate("AzureWorkloadIdentity"). reportProblemsToJiraComponent("cloud-credential-operator"). contactPerson("abutcher"). productScope(ocpSpecific). - enableIn(Default, TechPreviewNoUpgrade). + enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateMaxUnavailableStatefulSet = newFeatureGate("MaxUnavailableStatefulSet"). reportProblemsToJiraComponent("apps"). contactPerson("atiratree"). productScope(kubernetes). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateEventedPLEG = newFeatureGate("EventedPLEG"). @@ -307,84 +307,84 @@ var ( reportProblemsToJiraComponent("Routing"). contactPerson("miciah"). productScope(ocpSpecific). - enableIn(Default, TechPreviewNoUpgrade). + enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateSigstoreImageVerification = newFeatureGate("SigstoreImageVerification"). reportProblemsToJiraComponent("node"). contactPerson("sgrunert"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateGCPLabelsTags = newFeatureGate("GCPLabelsTags"). reportProblemsToJiraComponent("Installer"). contactPerson("bhb"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateAlibabaPlatform = newFeatureGate("AlibabaPlatform"). reportProblemsToJiraComponent("cloud-provider"). contactPerson("jspeed"). productScope(ocpSpecific). - enableIn(Default, TechPreviewNoUpgrade). + enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateCloudDualStackNodeIPs = newFeatureGate("CloudDualStackNodeIPs"). reportProblemsToJiraComponent("machine-config-operator/platform-baremetal"). contactPerson("mkowalsk"). productScope(kubernetes). - enableIn(Default, TechPreviewNoUpgrade). + enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateVSphereStaticIPs = newFeatureGate("VSphereStaticIPs"). reportProblemsToJiraComponent("splat"). contactPerson("rvanderp3"). productScope(ocpSpecific). - enableIn(Default, TechPreviewNoUpgrade). + enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateRouteExternalCertificate = newFeatureGate("RouteExternalCertificate"). reportProblemsToJiraComponent("router"). contactPerson("thejasn"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateAdminNetworkPolicy = newFeatureGate("AdminNetworkPolicy"). reportProblemsToJiraComponent("Networking/ovn-kubernetes"). contactPerson("tssurya"). productScope(ocpSpecific). - enableIn(Default, TechPreviewNoUpgrade). + enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateNetworkLiveMigration = newFeatureGate("NetworkLiveMigration"). reportProblemsToJiraComponent("Networking/ovn-kubernetes"). contactPerson("pliu"). productScope(ocpSpecific). - enableIn(Default, TechPreviewNoUpgrade). + enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateNetworkDiagnosticsConfig = newFeatureGate("NetworkDiagnosticsConfig"). reportProblemsToJiraComponent("Networking/cluster-network-operator"). contactPerson("kyrtapz"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateHardwareSpeed = newFeatureGate("HardwareSpeed"). reportProblemsToJiraComponent("etcd"). contactPerson("hasbro17"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateAutomatedEtcdBackup = newFeatureGate("AutomatedEtcdBackup"). reportProblemsToJiraComponent("etcd"). contactPerson("hasbro17"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateMachineAPIOperatorDisableMachineHealthCheckController = newFeatureGate("MachineAPIOperatorDisableMachineHealthCheckController"). @@ -397,21 +397,21 @@ var ( reportProblemsToJiraComponent("dns"). contactPerson("miciah"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateVSphereControlPlaneMachineset = newFeatureGate("VSphereControlPlaneMachineSet"). reportProblemsToJiraComponent("splat"). contactPerson("rvanderp3"). productScope(ocpSpecific). - enableIn(Default, TechPreviewNoUpgrade). + enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateMachineConfigNodes = newFeatureGate("MachineConfigNodes"). reportProblemsToJiraComponent("MachineConfigOperator"). contactPerson("cdoern"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateClusterAPIInstall = newFeatureGate("ClusterAPIInstall"). @@ -424,98 +424,98 @@ var ( reportProblemsToJiraComponent("Monitoring"). contactPerson("slashpai"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateInstallAlternateInfrastructureAWS = newFeatureGate("InstallAlternateInfrastructureAWS"). reportProblemsToJiraComponent("Installer"). contactPerson("padillon"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateGCPClusterHostedDNS = newFeatureGate("GCPClusterHostedDNS"). reportProblemsToJiraComponent("Installer"). contactPerson("barbacbd"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateMixedCPUsAllocation = newFeatureGate("MixedCPUsAllocation"). reportProblemsToJiraComponent("NodeTuningOperator"). contactPerson("titzhak"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateManagedBootImages = newFeatureGate("ManagedBootImages"). reportProblemsToJiraComponent("MachineConfigOperator"). contactPerson("djoshy"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateDisableKubeletCloudCredentialProviders = newFeatureGate("DisableKubeletCloudCredentialProviders"). reportProblemsToJiraComponent("cloud-provider"). contactPerson("jspeed"). productScope(kubernetes). - enableIn(Default, TechPreviewNoUpgrade). + enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateOnClusterBuild = newFeatureGate("OnClusterBuild"). reportProblemsToJiraComponent("MachineConfigOperator"). contactPerson("dkhater"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateSignatureStores = newFeatureGate("SignatureStores"). reportProblemsToJiraComponent("Cluster Version Operator"). contactPerson("lmohanty"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateKMSv1 = newFeatureGate("KMSv1"). reportProblemsToJiraComponent("kube-apiserver"). contactPerson("dgrisonnet"). productScope(kubernetes). - enableIn(Default, TechPreviewNoUpgrade). + enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGatePinnedImages = newFeatureGate("PinnedImages"). reportProblemsToJiraComponent("MachineConfigOperator"). contactPerson("jhernand"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateUpgradeStatus = newFeatureGate("UpgradeStatus"). reportProblemsToJiraComponent("Cluster Version Operator"). contactPerson("pmuller"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateTranslateStreamCloseWebsocketRequests = newFeatureGate("TranslateStreamCloseWebsocketRequests"). reportProblemsToJiraComponent("kube-apiserver"). contactPerson("akashem"). productScope(kubernetes). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateVolumeGroupSnapshot = newFeatureGate("VolumeGroupSnapshot"). reportProblemsToJiraComponent("Storage / Kubernetes External Components"). contactPerson("fbertina"). productScope(kubernetes). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateExternalOIDC = newFeatureGate("ExternalOIDC"). reportProblemsToJiraComponent("authentication"). contactPerson("stlaz"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). enableForClusterProfile(Hypershift, Default, TechPreviewNoUpgrade). mustRegister() @@ -523,76 +523,76 @@ var ( reportProblemsToJiraComponent("cluster-config"). contactPerson("deads"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGatePlatformOperators = newFeatureGate("PlatformOperators"). reportProblemsToJiraComponent("olm"). contactPerson("joe"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateNewOLM = newFeatureGate("NewOLM"). reportProblemsToJiraComponent("olm"). contactPerson("joe"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateExternalRouteCertificate = newFeatureGate("ExternalRouteCertificate"). reportProblemsToJiraComponent("network-edge"). contactPerson("miciah"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateInsightsOnDemandDataGather = newFeatureGate("InsightsOnDemandDataGather"). reportProblemsToJiraComponent("insights"). contactPerson("tremes"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateBareMetalLoadBalancer = newFeatureGate("BareMetalLoadBalancer"). reportProblemsToJiraComponent("metal"). contactPerson("EmilienM"). productScope(ocpSpecific). - enableIn(Default, TechPreviewNoUpgrade). + enableIn(Default, DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateInsightsConfig = newFeatureGate("InsightsConfig"). reportProblemsToJiraComponent("insights"). contactPerson("tremes"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateImagePolicy = newFeatureGate("ImagePolicy"). reportProblemsToJiraComponent("node"). contactPerson("rphillips"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateNodeDisruptionPolicy = newFeatureGate("NodeDisruptionPolicy"). reportProblemsToJiraComponent("MachineConfigOperator"). contactPerson("jerzhang"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateMetricsCollectionProfiles = newFeatureGate("MetricsCollectionProfiles"). reportProblemsToJiraComponent("Monitoring"). contactPerson("rexagod"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() FeatureGateVSphereDriverConfiguration = newFeatureGate("VSphereDriverConfiguration"). reportProblemsToJiraComponent("Storage / Kubernetes External Components"). contactPerson("rbednar"). productScope(ocpSpecific). - enableIn(TechPreviewNoUpgrade). + enableIn(DevPreviewNoUpgrade, TechPreviewNoUpgrade). mustRegister() ) diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index 2efe16f4e6..ef2c0cc141 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -43,16 +43,17 @@ var ( // this feature set on CANNOT BE UNDONE and PREVENTS UPGRADES. TechPreviewNoUpgrade FeatureSet = "TechPreviewNoUpgrade" + // DevPreviewNoUpgrade turns on dev preview features that are not part of the normal supported platform. Turning + // this feature set on CANNOT BE UNDONE and PREVENTS UPGRADES. + DevPreviewNoUpgrade FeatureSet = "DevPreviewNoUpgrade" + // CustomNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. // Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations // your cluster may fail in an unrecoverable way. CustomNoUpgrade FeatureSet = "CustomNoUpgrade" - // TopologyManager enables ToplogyManager support. Upgrades are enabled with this feature. - LatencySensitive FeatureSet = "LatencySensitive" - // AllFixedFeatureSets are the featuresets that have known featuregates. Custom doesn't for instance. LatencySensitive is dead - AllFixedFeatureSets = []FeatureSet{Default, TechPreviewNoUpgrade} + AllFixedFeatureSets = []FeatureSet{Default, TechPreviewNoUpgrade, DevPreviewNoUpgrade} ) type FeatureGateSpec struct { @@ -67,6 +68,7 @@ type FeatureGateSelection struct { // +optional // +kubebuilder:validation:XValidation:rule="oldSelf == 'CustomNoUpgrade' ? self == 'CustomNoUpgrade' : true",message="CustomNoUpgrade may not be changed" // +kubebuilder:validation:XValidation:rule="oldSelf == 'TechPreviewNoUpgrade' ? self == 'TechPreviewNoUpgrade' : true",message="TechPreviewNoUpgrade may not be changed" + // +kubebuilder:validation:XValidation:rule="oldSelf == 'DevPreviewNoUpgrade' ? self == 'DevPreviewNoUpgrade' : true",message="DevPreviewNoUpgrade may not be changed" FeatureSet FeatureSet `json:"featureSet,omitempty"` // customNoUpgrade allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, and PREVENTS UPGRADES. diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..f4be4a6f79 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_00_cluster-version-operator_01_clusterversions-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,781 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/495 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: clusterversions.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ClusterVersion + listKind: ClusterVersionList + plural: clusterversions + singular: clusterversion + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.history[?(@.state=="Completed")].version + name: Version + type: string + - jsonPath: .status.conditions[?(@.type=="Available")].status + name: Available + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].status + name: Progressing + type: string + - jsonPath: .status.conditions[?(@.type=="Progressing")].lastTransitionTime + name: Since + type: date + - jsonPath: .status.conditions[?(@.type=="Progressing")].message + name: Status + type: string + name: v1 + schema: + openAPIV3Schema: + description: "ClusterVersion is the configuration for the ClusterVersionOperator. + This is where parameters related to automatic updates can be set. \n Compatibility + level 1: Stable within a major release for a minimum of 12 months or 3 minor + releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the desired state of the cluster version - the operator + will work to ensure that the desired version is applied to the cluster. + properties: + capabilities: + description: capabilities configures the installation of optional, + core cluster components. A null value here is identical to an empty + object; see the child properties for default semantics. + properties: + additionalEnabledCapabilities: + description: additionalEnabledCapabilities extends the set of + managed capabilities beyond the baseline defined in baselineCapabilitySet. The + default is an empty set. + items: + description: ClusterVersionCapability enumerates optional, core + cluster components. + enum: + - openshift-samples + - baremetal + - marketplace + - Console + - Insights + - Storage + - CSISnapshot + - NodeTuning + - MachineAPI + - Build + - DeploymentConfig + - ImageRegistry + - OperatorLifecycleManager + - CloudCredential + - Ingress + - CloudControllerManager + type: string + type: array + x-kubernetes-list-type: atomic + baselineCapabilitySet: + description: baselineCapabilitySet selects an initial set of optional + capabilities to enable, which can be extended via additionalEnabledCapabilities. If + unset, the cluster will choose a default, and the default may + change over time. The current default is vCurrent. + enum: + - None + - v4.11 + - v4.12 + - v4.13 + - v4.14 + - v4.15 + - v4.16 + - vCurrent + type: string + type: object + channel: + description: channel is an identifier for explicitly requesting that + a non-default set of updates be applied to this cluster. The default + channel will be contain stable updates that are appropriate for + production clusters. + type: string + clusterID: + description: clusterID uniquely identifies this cluster. This is expected + to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + in hexadecimal values). This is a required field. + type: string + desiredUpdate: + description: "desiredUpdate is an optional field that indicates the + desired value of the cluster version. Setting this value will trigger + an upgrade (if the current version does not match the desired version). + The set of recommended update values is listed as part of available + updates in status, and setting values outside that range may cause + the upgrade to fail. \n Some of the fields are inter-related with + restrictions and meanings described here. 1. image is specified, + version is specified, architecture is specified. API validation + error. 2. image is specified, version is specified, architecture + is not specified. You should not do this. version is silently ignored + and image is used. 3. image is specified, version is not specified, + architecture is specified. API validation error. 4. image is specified, + version is not specified, architecture is not specified. image is + used. 5. image is not specified, version is specified, architecture + is specified. version and desired architecture are used to select + an image. 6. image is not specified, version is specified, architecture + is not specified. version and current architecture are used to select + an image. 7. image is not specified, version is not specified, architecture + is specified. API validation error. 8. image is not specified, version + is not specified, architecture is not specified. API validation + error. \n If an upgrade fails the operator will halt and report + status about the failing component. Setting the desired update value + back to the previous version will cause a rollback to be attempted. + Not all rollbacks will succeed." + properties: + architecture: + description: architecture is an optional field that indicates + the desired value of the cluster architecture. In this context + cluster architecture means either a single architecture or a + multi architecture. architecture can only be set to Multi thereby + only allowing updates from single to multi architecture. If + architecture is set, image cannot be set and version must be + set. Valid values are 'Multi' and empty. + enum: + - Multi + - "" + type: string + force: + description: force allows an administrator to update to an image + that has failed verification or upgradeable checks. This option + should only be used when the authenticity of the provided image + has been verified out of band because the provided image will + run with full administrative access to the cluster. Do not use + this flag with images that comes from unknown or potentially + malicious sources. + type: boolean + image: + description: image is a container image location that contains + the update. image should be used when the desired version does + not exist in availableUpdates or history. When image is set, + version is ignored. When image is set, version should be empty. + When image is set, architecture cannot be specified. + type: string + version: + description: version is a semantic version identifying the update + version. version is ignored if image is specified and required + if architecture is specified. + type: string + type: object + x-kubernetes-validations: + - message: cannot set both Architecture and Image + rule: 'has(self.architecture) && has(self.image) ? (self.architecture + == '''' || self.image == '''') : true' + - message: Version must be set if Architecture is set + rule: 'has(self.architecture) && self.architecture != '''' ? self.version + != '''' : true' + overrides: + description: overrides is list of overides for components that are + managed by cluster version operator. Marking a component unmanaged + will prevent the operator from creating or updating the object. + items: + description: ComponentOverride allows overriding cluster version + operator's behavior for a component. + properties: + group: + description: group identifies the API group that the kind is + in. + type: string + kind: + description: kind indentifies which object to override. + type: string + name: + description: name is the component's name. + type: string + namespace: + description: namespace is the component's namespace. If the + resource is cluster scoped, the namespace should be empty. + type: string + unmanaged: + description: 'unmanaged controls if cluster version operator + should stop managing the resources in this cluster. Default: + false' + type: boolean + required: + - group + - kind + - name + - namespace + - unmanaged + type: object + type: array + x-kubernetes-list-map-keys: + - kind + - group + - namespace + - name + x-kubernetes-list-type: map + signatureStores: + description: "signatureStores contains the upstream URIs to verify + release signatures and optional reference to a config map by name + containing the PEM-encoded CA bundle. \n By default, CVO will use + existing signature stores if this property is empty. The CVO will + check the release signatures in the local ConfigMaps first. It will + search for a valid signature in these stores in parallel only when + local ConfigMaps did not include a valid signature. Validation will + fail if none of the signature stores reply with valid signature + before timeout. Setting signatureStores will replace the default + signature stores with custom signature stores. Default stores can + be used with custom signature stores by adding them manually. \n + A maximum of 32 signature stores may be configured." + items: + description: SignatureStore represents the URL of custom Signature + Store + properties: + ca: + description: ca is an optional reference to a config map by + name containing the PEM-encoded CA bundle. It is used as a + trust anchor to validate the TLS certificate presented by + the remote server. The key "ca.crt" is used to locate the + data. If specified and the config map or expected key is not + found, the signature store is not honored. If the specified + ca data is not valid, the signature store is not honored. + If empty, we fall back to the CA configured via Proxy, which + is appended to the default system roots. The namespace for + this config map is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + required: + - name + type: object + url: + description: url contains the upstream custom signature store + URL. url should be a valid absolute http/https URI of an upstream + signature store as per rfc1738. This must be provided and + cannot be empty. + type: string + x-kubernetes-validations: + - message: url must be a valid absolute URL + rule: isURL(self) + required: + - url + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - url + x-kubernetes-list-type: map + upstream: + description: upstream may be used to specify the preferred update + server. By default it will use the appropriate update server for + the cluster and region. + type: string + required: + - clusterID + type: object + status: + description: status contains information about the available updates and + any in-progress updates. + properties: + availableUpdates: + description: availableUpdates contains updates recommended for this + cluster. Updates which appear in conditionalUpdates but not in availableUpdates + may expose this cluster to known issues. This list may be empty + if no updates are recommended, if the update service is unavailable, + or if an invalid channel has been specified. + items: + description: Release represents an OpenShift release image and associated + metadata. + properties: + channels: + description: channels is the set of Cincinnati channels to which + the release currently belongs. + items: + type: string + type: array + x-kubernetes-list-type: set + image: + description: image is a container image location that contains + the update. When this field is part of spec, image is optional + if version is specified and the availableUpdates field contains + a matching version. + type: string + url: + description: url contains information about this release. This + URL is set by the 'url' metadata property on a release or + the metadata returned by the update API and should be displayed + as a link in user interfaces. The URL field may not be set + for test or nightly releases. + type: string + version: + description: version is a semantic version identifying the update + version. When this field is part of spec, version is optional + if image is specified. + type: string + type: object + nullable: true + type: array + x-kubernetes-list-type: atomic + capabilities: + description: capabilities describes the state of optional, core cluster + components. + properties: + enabledCapabilities: + description: enabledCapabilities lists all the capabilities that + are currently managed. + items: + description: ClusterVersionCapability enumerates optional, core + cluster components. + enum: + - openshift-samples + - baremetal + - marketplace + - Console + - Insights + - Storage + - CSISnapshot + - NodeTuning + - MachineAPI + - Build + - DeploymentConfig + - ImageRegistry + - OperatorLifecycleManager + - CloudCredential + - Ingress + - CloudControllerManager + type: string + type: array + x-kubernetes-list-type: atomic + knownCapabilities: + description: knownCapabilities lists all the capabilities known + to the current cluster. + items: + description: ClusterVersionCapability enumerates optional, core + cluster components. + enum: + - openshift-samples + - baremetal + - marketplace + - Console + - Insights + - Storage + - CSISnapshot + - NodeTuning + - MachineAPI + - Build + - DeploymentConfig + - ImageRegistry + - OperatorLifecycleManager + - CloudCredential + - Ingress + - CloudControllerManager + type: string + type: array + x-kubernetes-list-type: atomic + type: object + conditionalUpdates: + description: conditionalUpdates contains the list of updates that + may be recommended for this cluster if it meets specific required + conditions. Consumers interested in the set of updates that are + actually recommended for this cluster should use availableUpdates. + This list may be empty if no updates are recommended, if the update + service is unavailable, or if an empty or invalid channel has been + specified. + items: + description: ConditionalUpdate represents an update which is recommended + to some clusters on the version the current cluster is reconciling, + but which may not be recommended for the current cluster. + properties: + conditions: + description: 'conditions represents the observations of the + conditional update''s current status. Known types are: * Recommended, + for whether the update is recommended for the current cluster.' + items: + description: "Condition contains details for one aspect of + the current state of this API Resource. --- This struct + is intended for direct use as an array at the field path + .status.conditions. For example, \n type FooStatus struct{ + // Represents the observations of a foo's current state. + // Known .status.conditions.type are: \"Available\", \"Progressing\", + and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields + }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should + be when the underlying condition changed. If that is + not known, then using the time when the API field changed + is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, + if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the + current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier + indicating the reason for the condition's last transition. + Producers of specific condition types may define expected + values and meanings for this field, and whether the + values are considered a guaranteed API. The value should + be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across + resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability + to deconflict is important. The regex it matches is + (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + release: + description: release is the target of the update. + properties: + channels: + description: channels is the set of Cincinnati channels + to which the release currently belongs. + items: + type: string + type: array + x-kubernetes-list-type: set + image: + description: image is a container image location that contains + the update. When this field is part of spec, image is + optional if version is specified and the availableUpdates + field contains a matching version. + type: string + url: + description: url contains information about this release. + This URL is set by the 'url' metadata property on a release + or the metadata returned by the update API and should + be displayed as a link in user interfaces. The URL field + may not be set for test or nightly releases. + type: string + version: + description: version is a semantic version identifying the + update version. When this field is part of spec, version + is optional if image is specified. + type: string + type: object + risks: + description: risks represents the range of issues associated + with updating to the target release. The cluster-version operator + will evaluate all entries, and only recommend the update if + there is at least one entry and all entries recommend the + update. + items: + description: ConditionalUpdateRisk represents a reason and + cluster-state for not recommending a conditional update. + properties: + matchingRules: + description: matchingRules is a slice of conditions for + deciding which clusters match the risk and which do + not. The slice is ordered by decreasing precedence. + The cluster-version operator will walk the slice in + order, and stop after the first it can successfully + evaluate. If no condition can be successfully evaluated, + the update will not be recommended. + items: + description: ClusterCondition is a union of typed cluster + conditions. The 'type' property determines which + of the type-specific properties are relevant. When + evaluated on a cluster, the condition may match, not + match, or fail to evaluate. + properties: + promql: + description: promQL represents a cluster condition + based on PromQL. + properties: + promql: + description: PromQL is a PromQL query classifying + clusters. This query query should return a + 1 in the match case and a 0 in the does-not-match + case. Queries which return no time series, + or which return values besides 0 or 1, are + evaluation failures. + type: string + required: + - promql + type: object + type: + description: type represents the cluster-condition + type. This defines the members and semantics of + any additional properties. + enum: + - Always + - PromQL + type: string + required: + - type + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + message: + description: message provides additional information about + the risk of updating, in the event that matchingRules + match the cluster state. This is only to be consumed + by humans. It may contain Line Feed characters (U+000A), + which should be rendered as new lines. + minLength: 1 + type: string + name: + description: name is the CamelCase reason for not recommending + a conditional update, in the event that matchingRules + match the cluster state. + minLength: 1 + type: string + url: + description: url contains information about this risk. + format: uri + minLength: 1 + type: string + required: + - matchingRules + - message + - name + - url + type: object + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - release + - risks + type: object + type: array + x-kubernetes-list-type: atomic + conditions: + description: conditions provides information about the cluster version. + The condition "Available" is set to true if the desiredUpdate has + been reached. The condition "Progressing" is set to true if an update + is being applied. The condition "Degraded" is set to true if an + update is currently blocked by a temporary or permanent error. Conditions + are only valid for the current desiredUpdate when metadata.generation + is equal to status.generation. + items: + description: ClusterOperatorStatusCondition represents the state + of the operator's managed and monitored components. + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update + to the current status property. + format: date-time + type: string + message: + description: message provides additional information about the + current condition. This is only to be consumed by humans. It + may contain Line Feed characters (U+000A), which should be + rendered as new lines. + type: string + reason: + description: reason is the CamelCase reason for the condition's + current status. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type specifies the aspect reported by this condition. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + desired: + description: desired is the version that the cluster is reconciling + towards. If the cluster is not yet fully initialized desired will + be set with the information available, which may be an image or + a tag. + properties: + channels: + description: channels is the set of Cincinnati channels to which + the release currently belongs. + items: + type: string + type: array + x-kubernetes-list-type: set + image: + description: image is a container image location that contains + the update. When this field is part of spec, image is optional + if version is specified and the availableUpdates field contains + a matching version. + type: string + url: + description: url contains information about this release. This + URL is set by the 'url' metadata property on a release or the + metadata returned by the update API and should be displayed + as a link in user interfaces. The URL field may not be set for + test or nightly releases. + type: string + version: + description: version is a semantic version identifying the update + version. When this field is part of spec, version is optional + if image is specified. + type: string + type: object + history: + description: history contains a list of the most recent versions applied + to the cluster. This value may be empty during cluster startup, + and then will be updated when a new update is being applied. The + newest update is first in the list and it is ordered by recency. + Updates in the history have state Completed if the rollout completed + - if an update was failing or halfway applied the state will be + Partial. Only a limited amount of update history is preserved. + items: + description: UpdateHistory is a single attempted update to the cluster. + properties: + acceptedRisks: + description: acceptedRisks records risks which were accepted + to initiate the update. For example, it may menition an Upgradeable=False + or missing signature that was overriden via desiredUpdate.force, + or an update that was initiated despite not being in the availableUpdates + set of recommended update targets. + type: string + completionTime: + description: completionTime, if set, is when the update was + fully applied. The update that is currently being applied + will have a null completion time. Completion time will always + be set for entries that are not the current update (usually + to the started time of the next update). + format: date-time + nullable: true + type: string + image: + description: image is a container image location that contains + the update. This value is always populated. + type: string + startedTime: + description: startedTime is the time at which the update was + started. + format: date-time + type: string + state: + description: state reflects whether the update was fully applied. + The Partial state indicates the update is not fully applied, + while the Completed state indicates the update was successfully + rolled out at least once (all parts of the update successfully + applied). + type: string + verified: + description: verified indicates whether the provided update + was properly verified before it was installed. If this is + false the cluster may not be trusted. Verified does not cover + upgradeable checks that depend on the cluster state at the + time when the update target was accepted. + type: boolean + version: + description: version is a semantic version identifying the update + version. If the requested image does not define a version, + or if a failure occurs retrieving the image, this value may + be empty. + type: string + required: + - completionTime + - image + - startedTime + - state + - verified + type: object + type: array + x-kubernetes-list-type: atomic + observedGeneration: + description: observedGeneration reports which version of the spec + is being synced. If this value is not equal to metadata.generation, + then the desired and conditions fields may represent a previous + version. + format: int64 + type: integer + versionHash: + description: versionHash is a fingerprint of the content that the + cluster will be updated with. It is used by the operator to avoid + unnecessary work and is for internal use only. + type: string + required: + - availableUpdates + - desired + - observedGeneration + - versionHash + type: object + required: + - spec + type: object + x-kubernetes-validations: + - message: the `baremetal` capability requires the `MachineAPI` capability, + which is neither explicitly or implicitly enabled in this cluster, please + enable the `MachineAPI` capability + rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) + && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''baremetal'' + in self.spec.capabilities.additionalEnabledCapabilities ? ''MachineAPI'' + in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) + && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) + && ''MachineAPI'' in self.status.capabilities.enabledCapabilities) : true' + - message: the `marketplace` capability requires the `OperatorLifecycleManager` + capability, which is neither explicitly or implicitly enabled in this + cluster, please enable the `OperatorLifecycleManager` capability + rule: 'has(self.spec.capabilities) && has(self.spec.capabilities.additionalEnabledCapabilities) + && self.spec.capabilities.baselineCapabilitySet == ''None'' && ''marketplace'' + in self.spec.capabilities.additionalEnabledCapabilities ? ''OperatorLifecycleManager'' + in self.spec.capabilities.additionalEnabledCapabilities || (has(self.status) + && has(self.status.capabilities) && has(self.status.capabilities.enabledCapabilities) + && ''OperatorLifecycleManager'' in self.status.capabilities.enabledCapabilities) + : true' + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..028ae8f3f5 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_authentications-SelfManagedHA-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,553 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: authentications.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Authentication + listKind: AuthenticationList + plural: authentications + singular: authentication + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Authentication specifies cluster-wide settings for authentication + (like OAuth and webhook token authenticators). The canonical name of an + instance is `cluster`. \n Compatibility level 1: Stable within a major release + for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + oauthMetadata: + description: 'oauthMetadata contains the discovery endpoint data for + OAuth 2.0 Authorization Server Metadata for an external OAuth server. + This discovery document can be viewed from its served location: + oc get --raw ''/.well-known/oauth-authorization-server'' For further + details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + If oauthMetadata.name is non-empty, this value has precedence over + any metadata reference stored in status. The key "oauthMetadata" + is used to locate the data. If specified and the config map or expected + key is not found, no metadata is served. If the specified metadata + is not valid, no metadata is served. The namespace for this config + map is openshift-config.' + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + required: + - name + type: object + oidcProviders: + description: "OIDCProviders are OIDC identity providers that can issue + tokens for this cluster Can only be set if \"Type\" is set to \"OIDC\". + \n At most one provider can be configured." + items: + properties: + claimMappings: + description: ClaimMappings describes rules on how to transform + information from an ID token into a cluster identity + properties: + groups: + description: Groups is a name of the claim that should be + used to construct groups for the cluster identity. The + referenced claim must use array of strings values. + properties: + claim: + description: Claim is a JWT token claim to be used in + the mapping + type: string + prefix: + description: "Prefix is a string to prefix the value + from the token in the result of the claim mapping. + \n By default, no prefixing occurs. \n Example: if + `prefix` is set to \"myoidc:\"\" and the `claim` in + JWT contains an array of strings \"a\", \"b\" and + \ \"c\", the mapping will result in an array of string + \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\"." + type: string + required: + - claim + type: object + username: + description: "Username is a name of the claim that should + be used to construct usernames for the cluster identity. + \n Default value: \"sub\"" + properties: + claim: + description: Claim is a JWT token claim to be used in + the mapping + type: string + prefix: + properties: + prefixString: + minLength: 1 + type: string + required: + - prefixString + type: object + prefixPolicy: + description: "PrefixPolicy specifies how a prefix should + apply. \n By default, claims other than `email` will + be prefixed with the issuer URL to prevent naming + clashes with other plugins. \n Set to \"NoPrefix\" + to disable prefixing. \n Example: (1) `prefix` is + set to \"myoidc:\" and `claim` is set to \"username\". + If the JWT claim `username` contains value `userA`, + the resulting mapped value will be \"myoidc:userA\". + (2) `prefix` is set to \"myoidc:\" and `claim` is + set to \"email\". If the JWT `email` claim contains + value \"userA@myoidc.tld\", the resulting mapped value + will be \"myoidc:userA@myoidc.tld\". (3) `prefix` + is unset, `issuerURL` is set to `https://myoidc.tld`, + the JWT claims include \"username\":\"userA\" and + \"email\":\"userA@myoidc.tld\", and `claim` is set + to: (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\" + (b) \"email\": the mapped value will be \"userA@myoidc.tld\"" + enum: + - "" + - NoPrefix + - Prefix + type: string + required: + - claim + type: object + x-kubernetes-validations: + - message: prefix must be set if prefixPolicy is 'Prefix', + but must remain unset otherwise + rule: 'has(self.prefixPolicy) && self.prefixPolicy == + ''Prefix'' ? (has(self.prefix) && size(self.prefix.prefixString) + > 0) : !has(self.prefix)' + type: object + claimValidationRules: + description: ClaimValidationRules are rules that are applied + to validate token claims to authenticate users. + items: + properties: + requiredClaim: + description: RequiredClaim allows configuring a required + claim name and its expected value + properties: + claim: + description: Claim is a name of a required claim. + Only claims with string values are supported. + minLength: 1 + type: string + requiredValue: + description: RequiredValue is the required value for + the claim. + minLength: 1 + type: string + required: + - claim + - requiredValue + type: object + type: + default: RequiredClaim + description: Type sets the type of the validation rule + enum: + - RequiredClaim + type: string + type: object + type: array + x-kubernetes-list-type: atomic + issuer: + description: Issuer describes atributes of the OIDC token issuer + properties: + audiences: + description: Audiences is an array of audiences that the + token was issued for. Valid tokens must include at least + one of these values in their "aud" claim. Must be set + to exactly one value. + items: + minLength: 1 + type: string + maxItems: 10 + minItems: 1 + type: array + x-kubernetes-list-type: set + issuerCertificateAuthority: + description: CertificateAuthority is a reference to a config + map in the configuration namespace. The .data of the configMap + must contain the "ca-bundle.crt" key. If unset, system + trust is used instead. + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + required: + - name + type: object + issuerURL: + description: URL is the serving URL of the token issuer. + Must use the https:// scheme. + pattern: ^https:\/\/[^\s] + type: string + required: + - audiences + - issuerURL + type: object + name: + description: Name of the OIDC provider + minLength: 1 + type: string + oidcClients: + description: OIDCClients contains configuration for the platform's + clients that need to request tokens from the issuer + items: + properties: + clientID: + description: ClientID is the identifier of the OIDC client + from the OIDC provider + minLength: 1 + type: string + clientSecret: + description: ClientSecret refers to a secret in the `openshift-config` + namespace that contains the client secret in the `clientSecret` + key of the `.data` field + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + required: + - name + type: object + componentName: + description: ComponentName is the name of the component + that is supposed to consume this client configuration + maxLength: 256 + minLength: 1 + type: string + componentNamespace: + description: ComponentNamespace is the namespace of the + component that is supposed to consume this client configuration + maxLength: 63 + minLength: 1 + type: string + extraScopes: + description: ExtraScopes is an optional set of scopes + to request tokens with. + items: + type: string + type: array + x-kubernetes-list-type: set + required: + - clientID + - componentName + - componentNamespace + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - componentNamespace + - componentName + x-kubernetes-list-type: map + required: + - issuer + - name + type: object + maxItems: 1 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + serviceAccountIssuer: + description: 'serviceAccountIssuer is the identifier of the bound + service account token issuer. The default is https://kubernetes.default.svc + WARNING: Updating this field will not result in immediate invalidation + of all bound tokens with the previous issuer value. Instead, the + tokens issued by previous service account issuer will continue to + be trusted for a time period chosen by the platform (currently set + to 24h). This time period is subject to change over time. This allows + internal components to transition to use new service account issuer + without service distruption.' + type: string + type: + description: type identifies the cluster managed, user facing authentication + mode in use. Specifically, it manages the component that responds + to login attempts. The default is IntegratedOAuth. + enum: + - "" + - None + - IntegratedOAuth + - OIDC + type: string + webhookTokenAuthenticator: + description: "webhookTokenAuthenticator configures a remote token + reviewer. These remote authentication webhooks can be used to verify + bearer tokens via the tokenreviews.authentication.k8s.io REST API. + This is required to honor bearer tokens that are provisioned by + an external authentication service. \n Can only be set if \"Type\" + is set to \"None\"." + properties: + kubeConfig: + description: "kubeConfig references a secret that contains kube + config file data which describes how to access the remote webhook + service. The namespace for the referenced secret is openshift-config. + \n For further details, see: \n https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + \n The key \"kubeConfig\" is used to locate the data. If the + secret or expected key is not found, the webhook is not honored. + If the specified kube config data is not valid, the webhook + is not honored." + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + required: + - kubeConfig + type: object + webhookTokenAuthenticators: + description: webhookTokenAuthenticators is DEPRECATED, setting it + has no effect. + items: + description: deprecatedWebhookTokenAuthenticator holds the necessary + configuration options for a remote token authenticator. It's the + same as WebhookTokenAuthenticator but it's missing the 'required' + validation on KubeConfig field. + properties: + kubeConfig: + description: 'kubeConfig contains kube config file data which + describes how to access the remote webhook service. For further + details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + The key "kubeConfig" is used to locate the data. If the secret + or expected key is not found, the webhook is not honored. + If the specified kube config data is not valid, the webhook + is not honored. The namespace for this secret is determined + by the point of use.' + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + required: + - name + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + properties: + integratedOAuthMetadata: + description: 'integratedOAuthMetadata contains the discovery endpoint + data for OAuth 2.0 Authorization Server Metadata for the in-cluster + integrated OAuth server. This discovery document can be viewed from + its served location: oc get --raw ''/.well-known/oauth-authorization-server'' + For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + This contains the observed value based on cluster state. An explicitly + set value in spec.oauthMetadata has precedence over this field. + This field has no meaning if authentication spec.type is not set + to IntegratedOAuth. The key "oauthMetadata" is used to locate the + data. If the config map or expected key is not found, no metadata + is served. If the specified metadata is not valid, no metadata is + served. The namespace for this config map is openshift-config-managed.' + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + required: + - name + type: object + oidcClients: + description: OIDCClients is where participating operators place the + current OIDC client status for OIDC clients that can be customized + by the cluster-admin. + items: + properties: + componentName: + description: ComponentName is the name of the component that + will consume a client configuration. + maxLength: 256 + minLength: 1 + type: string + componentNamespace: + description: ComponentNamespace is the namespace of the component + that will consume a client configuration. + maxLength: 63 + minLength: 1 + type: string + conditions: + description: "Conditions are used to communicate the state of + the `oidcClients` entry. \n Supported conditions include Available, + Degraded and Progressing. \n If Available is true, the component + is successfully using the configured client. If Degraded is + true, that means something has gone wrong trying to handle + the client configuration. If Progressing is true, that means + the component is taking some action related to the `oidcClients` + entry." + items: + description: "Condition contains details for one aspect of + the current state of this API Resource. --- This struct + is intended for direct use as an array at the field path + .status.conditions. For example, \n type FooStatus struct{ + // Represents the observations of a foo's current state. + // Known .status.conditions.type are: \"Available\", \"Progressing\", + and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields + }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should + be when the underlying condition changed. If that is + not known, then using the time when the API field changed + is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, + if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the + current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier + indicating the reason for the condition's last transition. + Producers of specific condition types may define expected + values and meanings for this field, and whether the + values are considered a guaranteed API. The value should + be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across + resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability + to deconflict is important. The regex it matches is + (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + consumingUsers: + description: ConsumingUsers is a slice of ServiceAccounts that + need to have read permission on the `clientSecret` secret. + items: + description: ConsumingUser is an alias for string which we + add validation to. Currently only service accounts are supported. + maxLength: 512 + minLength: 1 + pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + maxItems: 5 + type: array + x-kubernetes-list-type: set + currentOIDCClients: + description: CurrentOIDCClients is a list of clients that the + component is currently using. + items: + properties: + clientID: + description: ClientID is the identifier of the OIDC client + from the OIDC provider + minLength: 1 + type: string + issuerURL: + description: URL is the serving URL of the token issuer. + Must use the https:// scheme. + pattern: ^https:\/\/[^\s] + type: string + oidcProviderName: + description: OIDCName refers to the `name` of the provider + from `oidcProviders` + minLength: 1 + type: string + required: + - clientID + - issuerURL + - oidcProviderName + type: object + type: array + x-kubernetes-list-map-keys: + - issuerURL + - clientID + x-kubernetes-list-type: map + required: + - componentName + - componentNamespace + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - componentNamespace + - componentName + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + x-kubernetes-validations: + - message: all oidcClients in the oidcProviders must match their componentName + and componentNamespace to either a previously configured oidcClient or + they must exist in the status.oidcClients + rule: '!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) + || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace + == specC.componentNamespace && statusC.componentName == specC.componentName) + || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, + oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, + oldC.componentNamespace == specC.componentNamespace && oldC.componentName + == specC.componentName)))))' + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_featuregates.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_featuregates.crd.yaml index b179eee0ab..c27cb73e2d 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_featuregates.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_featuregates.crd.yaml @@ -80,6 +80,9 @@ spec: - message: TechPreviewNoUpgrade may not be changed rule: 'oldSelf == ''TechPreviewNoUpgrade'' ? self == ''TechPreviewNoUpgrade'' : true' + - message: DevPreviewNoUpgrade may not be changed + rule: 'oldSelf == ''DevPreviewNoUpgrade'' ? self == ''DevPreviewNoUpgrade'' + : true' type: object status: description: status holds observed values from the cluster. They may not diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..5e12ca8e22 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_infrastructures-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,2149 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: infrastructures.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Infrastructure + listKind: InfrastructureList + plural: infrastructures + singular: infrastructure + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Infrastructure holds cluster-wide information about Infrastructure. + \ The canonical name is `cluster` \n Compatibility level 1: Stable within + a major release for a minimum of 12 months or 3 minor releases (whichever + is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + cloudConfig: + description: "cloudConfig is a reference to a ConfigMap containing + the cloud provider configuration file. This configuration file is + used to configure the Kubernetes cloud provider integration when + using the built-in cloud provider integration or the external cloud + controller manager. The namespace for this config map is openshift-config. + \n cloudConfig should only be consumed by the kube_cloud_config + controller. The controller is responsible for using the user configuration + in the spec for various platforms and combining that with the user + provided ConfigMap in this field to create a stitched kube cloud + config. The controller generates a ConfigMap `kube-cloud-config` + in `openshift-config-managed` namespace with the kube cloud config + is stored in `cloud.conf` key. All the clients are expected to use + the generated ConfigMap only." + properties: + key: + description: Key allows pointing to a specific key/value inside + of the configmap. This is useful for logical file references. + type: string + name: + type: string + type: object + platformSpec: + description: platformSpec holds desired information specific to the + underlying infrastructure provider. + properties: + alibabaCloud: + description: AlibabaCloud contains settings specific to the Alibaba + Cloud infrastructure provider. + type: object + aws: + description: AWS contains settings specific to the Amazon Web + Services infrastructure provider. + properties: + serviceEndpoints: + description: serviceEndpoints list contains custom endpoints + which will override default service endpoint of AWS Services. + There must be only one ServiceEndpoint for a service. + items: + description: AWSServiceEndpoint store the configuration + of a custom url to override existing defaults of AWS Services. + properties: + name: + description: name is the name of the AWS service. The + list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html + This must be provided and cannot be empty. + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with scheme + https, that overrides the default generated endpoint + for a client. This must be provided and cannot be + empty. + pattern: ^https:// + type: string + type: object + type: array + x-kubernetes-list-type: atomic + type: object + azure: + description: Azure contains settings specific to the Azure infrastructure + provider. + type: object + baremetal: + description: BareMetal contains settings specific to the BareMetal + platform. + properties: + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to + contact the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. These are the IPs for + a self-hosted load balancer in front of the API servers. + In dual stack clusters this list contains two IP addresses, + one from IPv4 family and one from IPv6. In single stack + clusters a single IP address is expected. When omitted, + values from the status.apiServerInternalIPs will be used. + Once set, the list cannot be completely removed (but its + second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: apiServerInternalIPs must contain at most one IPv4 + address and at most one IPv6 address + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' + ingressIPs: + description: ingressIPs are the external IPs which route to + the default ingress controller. The IPs are suitable targets + of a wildcard DNS record used to resolve default route host + names. In dual stack clusters this list contains two IP + addresses, one from IPv4 family and one from IPv6. In single + stack clusters a single IP address is expected. When omitted, + values from the status.ingressIPs will be used. Once set, + the list cannot be completely removed (but its second entry + can). + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: ingressIPs must contain at most one IPv4 address + and at most one IPv6 address + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' + machineNetworks: + description: machineNetworks are IP networks used to connect + all the OpenShift cluster nodes. Each network is provided + in the CIDR format and should be IPv4 or IPv6, for example + "10.0.0.0/8" or "fd00::/8". + items: + description: CIDR is an IP address range in CIDR notation + (for example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 32 + type: array + x-kubernetes-list-type: set + type: object + x-kubernetes-validations: + - message: apiServerInternalIPs list is required once set + rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' + - message: ingressIPs list is required once set + rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' + equinixMetal: + description: EquinixMetal contains settings specific to the Equinix + Metal infrastructure provider. + type: object + external: + description: ExternalPlatformType represents generic infrastructure + provider. Platform-specific components should be supplemented + separately. + properties: + platformName: + default: Unknown + description: PlatformName holds the arbitrary string representing + the infrastructure provider name, expected to be set at + the installation time. This field is solely for informational + and reporting purposes and is not expected to be used for + decision-making. + type: string + x-kubernetes-validations: + - message: platform name cannot be changed once set + rule: oldSelf == 'Unknown' || self == oldSelf + type: object + gcp: + description: GCP contains settings specific to the Google Cloud + Platform infrastructure provider. + type: object + ibmcloud: + description: IBMCloud contains settings specific to the IBMCloud + infrastructure provider. + type: object + kubevirt: + description: Kubevirt contains settings specific to the kubevirt + infrastructure provider. + type: object + nutanix: + description: Nutanix contains settings specific to the Nutanix + infrastructure provider. + properties: + failureDomains: + description: failureDomains configures failure domains information + for the Nutanix platform. When set, the failure domains + defined here may be used to spread Machines across prism + element clusters to improve fault tolerance of the cluster. + items: + description: NutanixFailureDomain configures failure domain + information for the Nutanix platform. + properties: + cluster: + description: cluster is to identify the cluster (the + Prism Element under management of the Prism Central), + in which the Machine's VM will be created. The cluster + identifier (uuid or name) can be obtained from the + Prism Central console or using the prism_central API. + properties: + name: + description: name is the resource name in the PC. + It cannot be empty if the type is Name. + type: string + type: + description: type is the identifier type to use + for this resource. + enum: + - UUID + - Name + type: string + uuid: + description: uuid is the UUID of the resource in + the PC. It cannot be empty if the type is UUID. + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: uuid configuration is required when type + is UUID, and forbidden otherwise + rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) + : !has(self.uuid)' + - message: name configuration is required when type + is Name, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) + : !has(self.name)' + name: + description: name defines the unique name of a failure + domain. Name is required and must be at most 64 characters + in length. It must consist of only lower case alphanumeric + characters and hyphens (-). It must start and end + with an alphanumeric character. This value is arbitrary + and is used to identify the failure domain within + the platform. + maxLength: 64 + minLength: 1 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + subnets: + description: subnets holds a list of identifiers (one + or more) of the cluster's network subnets for the + Machine's VM to connect to. The subnet identifiers + (uuid or name) can be obtained from the Prism Central + console or using the prism_central API. + items: + description: NutanixResourceIdentifier holds the identity + of a Nutanix PC resource (cluster, image, subnet, + etc.) + properties: + name: + description: name is the resource name in the + PC. It cannot be empty if the type is Name. + type: string + type: + description: type is the identifier type to use + for this resource. + enum: + - UUID + - Name + type: string + uuid: + description: uuid is the UUID of the resource + in the PC. It cannot be empty if the type is + UUID. + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: uuid configuration is required when type + is UUID, and forbidden otherwise + rule: 'has(self.type) && self.type == ''UUID'' ? has(self.uuid) + : !has(self.uuid)' + - message: name configuration is required when type + is Name, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Name'' ? has(self.name) + : !has(self.name)' + maxItems: 1 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + required: + - cluster + - name + - subnets + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + prismCentral: + description: prismCentral holds the endpoint address and port + to access the Nutanix Prism Central. When a cluster-wide + proxy is installed, by default, this endpoint will be accessed + via the proxy. Should you wish for communication with this + endpoint not to be proxied, please add the endpoint to the + proxy spec.noProxy list. + properties: + address: + description: address is the endpoint address (DNS name + or IP address) of the Nutanix Prism Central or Element + (cluster) + maxLength: 256 + type: string + port: + description: port is the port number to access the Nutanix + Prism Central or Element (cluster) + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - address + - port + type: object + prismElements: + description: prismElements holds one or more endpoint address + and port data to access the Nutanix Prism Elements (clusters) + of the Nutanix Prism Central. Currently we only support + one Prism Element (cluster) for an OpenShift cluster, where + all the Nutanix resources (VMs, subnets, volumes, etc.) + used in the OpenShift cluster are located. In the future, + we may support Nutanix resources (VMs, etc.) spread over + multiple Prism Elements (clusters) of the Prism Central. + items: + description: NutanixPrismElementEndpoint holds the name + and endpoint data for a Prism Element (cluster) + properties: + endpoint: + description: endpoint holds the endpoint address and + port data of the Prism Element (cluster). When a cluster-wide + proxy is installed, by default, this endpoint will + be accessed via the proxy. Should you wish for communication + with this endpoint not to be proxied, please add the + endpoint to the proxy spec.noProxy list. + properties: + address: + description: address is the endpoint address (DNS + name or IP address) of the Nutanix Prism Central + or Element (cluster) + maxLength: 256 + type: string + port: + description: port is the port number to access the + Nutanix Prism Central or Element (cluster) + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - address + - port + type: object + name: + description: name is the name of the Prism Element (cluster). + This value will correspond with the cluster field + configured on other resources (eg Machines, PVCs, + etc). + maxLength: 256 + type: string + required: + - endpoint + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - prismCentral + - prismElements + type: object + openstack: + description: OpenStack contains settings specific to the OpenStack + infrastructure provider. + properties: + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to + contact the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. These are the IPs for + a self-hosted load balancer in front of the API servers. + In dual stack clusters this list contains two IP addresses, + one from IPv4 family and one from IPv6. In single stack + clusters a single IP address is expected. When omitted, + values from the status.apiServerInternalIPs will be used. + Once set, the list cannot be completely removed (but its + second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: apiServerInternalIPs must contain at most one IPv4 + address and at most one IPv6 address + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' + ingressIPs: + description: ingressIPs are the external IPs which route to + the default ingress controller. The IPs are suitable targets + of a wildcard DNS record used to resolve default route host + names. In dual stack clusters this list contains two IP + addresses, one from IPv4 family and one from IPv6. In single + stack clusters a single IP address is expected. When omitted, + values from the status.ingressIPs will be used. Once set, + the list cannot be completely removed (but its second entry + can). + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: ingressIPs must contain at most one IPv4 address + and at most one IPv6 address + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' + machineNetworks: + description: machineNetworks are IP networks used to connect + all the OpenShift cluster nodes. Each network is provided + in the CIDR format and should be IPv4 or IPv6, for example + "10.0.0.0/8" or "fd00::/8". + items: + description: CIDR is an IP address range in CIDR notation + (for example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 32 + type: array + x-kubernetes-list-type: set + type: object + x-kubernetes-validations: + - message: apiServerInternalIPs list is required once set + rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' + - message: ingressIPs list is required once set + rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' + ovirt: + description: Ovirt contains settings specific to the oVirt infrastructure + provider. + type: object + powervs: + description: PowerVS contains settings specific to the IBM Power + Systems Virtual Servers infrastructure provider. + properties: + serviceEndpoints: + description: serviceEndpoints is a list of custom endpoints + which will override the default service endpoints of a Power + VS service. + items: + description: PowervsServiceEndpoint stores the configuration + of a custom url to override existing defaults of PowerVS + Services. + properties: + name: + description: name is the name of the Power VS service. + Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api + ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller + Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with scheme + https, that overrides the default generated endpoint + for a client. This must be provided and cannot be + empty. + format: uri + pattern: ^https:// + type: string + required: + - name + - url + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + type: + description: type is the underlying infrastructure provider for + the cluster. This value controls whether infrastructure automation + such as service load balancers, dynamic volume provisioning, + machine creation and deletion, and other integrations are enabled. + If None, no infrastructure automation is enabled. Allowed values + are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", + "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", + "Nutanix" and "None". Individual components may not support + all platforms, and must handle unrecognized platforms as None + if they do not support that platform. + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + vsphere: + description: VSphere contains settings specific to the VSphere + infrastructure provider. + properties: + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to + contact the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. These are the IPs for + a self-hosted load balancer in front of the API servers. + In dual stack clusters this list contains two IP addresses, + one from IPv4 family and one from IPv6. In single stack + clusters a single IP address is expected. When omitted, + values from the status.apiServerInternalIPs will be used. + Once set, the list cannot be completely removed (but its + second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: apiServerInternalIPs must contain at most one IPv4 + address and at most one IPv6 address + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' + failureDomains: + description: failureDomains contains the definition of region, + zone and the vCenter topology. If this is omitted failure + domains (regions and zones) will not be used. + items: + description: VSpherePlatformFailureDomainSpec holds the + region and zone failure domain and the vCenter topology + of that failure domain. + properties: + name: + description: name defines the arbitrary but unique name + of a failure domain. + maxLength: 256 + minLength: 1 + type: string + region: + description: region defines the name of a region tag + that will be attached to a vCenter datacenter. The + tag category in vCenter must be named openshift-region. + maxLength: 80 + minLength: 1 + type: string + server: + anyOf: + - format: ipv4 + - format: ipv6 + - format: hostname + description: server is the fully-qualified domain name + or the IP address of the vCenter server. --- + maxLength: 255 + minLength: 1 + type: string + topology: + description: Topology describes a given failure domain + using vSphere constructs + properties: + computeCluster: + description: computeCluster the absolute path of + the vCenter cluster in which virtual machine will + be located. The absolute path is of the form //host/. + The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/host/.*? + type: string + datacenter: + description: datacenter is the name of vCenter datacenter + in which virtual machines will be located. The + maximum length of the datacenter name is 80 characters. + maxLength: 80 + type: string + datastore: + description: datastore is the absolute path of the + datastore in which the virtual machine is located. + The absolute path is of the form //datastore/ + The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/datastore/.*? + type: string + folder: + description: folder is the absolute path of the + folder where virtual machines are located. The + absolute path is of the form //vm/. + The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/vm/.*? + type: string + networks: + description: networks is the list of port group + network names within this failure domain. Currently, + we only support a single interface per RHCOS virtual + machine. The available networks (port groups) + can be listed using `govc ls 'network/*'` The + single interface should be the absolute path of + the form //network/. + items: + type: string + maxItems: 1 + minItems: 1 + type: array + x-kubernetes-list-type: atomic + resourcePool: + description: resourcePool is the absolute path of + the resource pool where virtual machines will + be created. The absolute path is of the form //host//Resources/. + The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/host/.*?/Resources.* + type: string + template: + description: "template is the full inventory path + of the virtual machine or template that will be + cloned when creating new machines in this failure + domain. The maximum length of the path is 2048 + characters. \n When omitted, the template will + be calculated by the control plane machineset + operator based on the region and zone defined + in VSpherePlatformFailureDomainSpec. For example, + for zone=zonea, region=region1, and infrastructure + name=test, the template path would be calculated + as //vm/test-rhcos-region1-zonea." + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string + required: + - computeCluster + - datacenter + - datastore + - networks + type: object + zone: + description: zone defines the name of a zone tag that + will be attached to a vCenter cluster. The tag category + in vCenter must be named openshift-zone. + maxLength: 80 + minLength: 1 + type: string + required: + - name + - region + - server + - topology + - zone + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + ingressIPs: + description: ingressIPs are the external IPs which route to + the default ingress controller. The IPs are suitable targets + of a wildcard DNS record used to resolve default route host + names. In dual stack clusters this list contains two IP + addresses, one from IPv4 family and one from IPv6. In single + stack clusters a single IP address is expected. When omitted, + values from the status.ingressIPs will be used. Once set, + the list cannot be completely removed (but its second entry + can). + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: ingressIPs must contain at most one IPv4 address + and at most one IPv6 address + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() : true' + machineNetworks: + description: machineNetworks are IP networks used to connect + all the OpenShift cluster nodes. Each network is provided + in the CIDR format and should be IPv4 or IPv6, for example + "10.0.0.0/8" or "fd00::/8". + items: + description: CIDR is an IP address range in CIDR notation + (for example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 32 + type: array + x-kubernetes-list-type: set + nodeNetworking: + description: nodeNetworking contains the definition of internal + and external network constraints for assigning the node's + networking. If this field is omitted, networking defaults + to the legacy address selection behavior which is to only + support a single address and return the first one found. + properties: + external: + description: external represents the network configuration + of the node that is externally routable. + properties: + excludeNetworkSubnetCidr: + description: excludeNetworkSubnetCidr IP addresses + in subnet ranges will be excluded when selecting + the IP address from the VirtualMachine's VM for + use in the status.addresses fields. --- + items: + format: cidr + type: string + type: array + x-kubernetes-list-type: atomic + network: + description: network VirtualMachine's VM Network names + that will be used to when searching for status.addresses + fields. Note that if internal.networkSubnetCIDR + and external.networkSubnetCIDR are not set, then + the vNIC associated to this network must only have + a single IP address assigned to it. The available + networks (port groups) can be listed using `govc + ls 'network/*'` + type: string + networkSubnetCidr: + description: networkSubnetCidr IP address on VirtualMachine's + network interfaces included in the fields' CIDRs + that will be used in respective status.addresses + fields. --- + items: + format: cidr + type: string + type: array + x-kubernetes-list-type: set + type: object + internal: + description: internal represents the network configuration + of the node that is routable only within the cluster. + properties: + excludeNetworkSubnetCidr: + description: excludeNetworkSubnetCidr IP addresses + in subnet ranges will be excluded when selecting + the IP address from the VirtualMachine's VM for + use in the status.addresses fields. --- + items: + format: cidr + type: string + type: array + x-kubernetes-list-type: atomic + network: + description: network VirtualMachine's VM Network names + that will be used to when searching for status.addresses + fields. Note that if internal.networkSubnetCIDR + and external.networkSubnetCIDR are not set, then + the vNIC associated to this network must only have + a single IP address assigned to it. The available + networks (port groups) can be listed using `govc + ls 'network/*'` + type: string + networkSubnetCidr: + description: networkSubnetCidr IP address on VirtualMachine's + network interfaces included in the fields' CIDRs + that will be used in respective status.addresses + fields. --- + items: + format: cidr + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + vcenters: + description: vcenters holds the connection details for services + to communicate with vCenter. Currently, only a single vCenter + is supported. --- + items: + description: VSpherePlatformVCenterSpec stores the vCenter + connection fields. This is used by the vSphere CCM. + properties: + datacenters: + description: The vCenter Datacenters in which the RHCOS + vm guests are located. This field will be used by + the Cloud Controller Manager. Each datacenter listed + here should be used within a topology. + items: + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + port: + description: port is the TCP port that will be used + to communicate to the vCenter endpoint. When omitted, + this means the user has no opinion and it is up to + the platform to choose a sensible default, which is + subject to change over time. + format: int32 + maximum: 32767 + minimum: 1 + type: integer + server: + anyOf: + - format: ipv4 + - format: ipv6 + - format: hostname + description: server is the fully-qualified domain name + or the IP address of the vCenter server. --- + maxLength: 255 + type: string + required: + - datacenters + - server + type: object + maxItems: 1 + minItems: 0 + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-validations: + - message: apiServerInternalIPs list is required once set + rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' + - message: ingressIPs list is required once set + rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' + type: object + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + properties: + apiServerInternalURI: + description: apiServerInternalURL is a valid URI with scheme 'https', + address and optionally a port (defaulting to 443). apiServerInternalURL + can be used by components like kubelets, to contact the Kubernetes + API server using the infrastructure provider rather than Kubernetes + networking. + type: string + apiServerURL: + description: apiServerURL is a valid URI with scheme 'https', address + and optionally a port (defaulting to 443). apiServerURL can be + used by components like the web console to tell users where to find + the Kubernetes API. + type: string + controlPlaneTopology: + default: HighlyAvailable + description: controlPlaneTopology expresses the expectations for operands + that normally run on control nodes. The default is 'HighlyAvailable', + which represents the behavior operators have in a "normal" cluster. + The 'SingleReplica' mode will be used in single-node deployments + and the operators should not configure the operand for highly-available + operation The 'External' mode indicates that the control plane is + hosted externally to the cluster and that its components are not + visible within the cluster. + enum: + - HighlyAvailable + - SingleReplica + - External + type: string + cpuPartitioning: + default: None + description: cpuPartitioning expresses if CPU partitioning is a currently + enabled feature in the cluster. CPU Partitioning means that this + cluster can support partitioning workloads to specific CPU Sets. + Valid values are "None" and "AllNodes". When omitted, the default + value is "None". The default value of "None" indicates that no nodes + will be setup with CPU partitioning. The "AllNodes" value indicates + that all nodes have been setup with CPU partitioning, and can then + be further configured via the PerformanceProfile API. + enum: + - None + - AllNodes + type: string + etcdDiscoveryDomain: + description: 'etcdDiscoveryDomain is the domain used to fetch the + SRV records for discovering etcd servers and clients. For more info: + https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery + deprecated: as of 4.7, this field is no longer set or honored. It + will be removed in a future release.' + type: string + infrastructureName: + description: infrastructureName uniquely identifies a cluster with + a human friendly name. Once set it should not be changed. Must be + of max length 27 and must have only alphanumeric or hyphen characters. + type: string + infrastructureTopology: + default: HighlyAvailable + description: 'infrastructureTopology expresses the expectations for + infrastructure services that do not run on control plane nodes, + usually indicated by a node selector for a `role` value other than + `master`. The default is ''HighlyAvailable'', which represents the + behavior operators have in a "normal" cluster. The ''SingleReplica'' + mode will be used in single-node deployments and the operators should + not configure the operand for highly-available operation NOTE: External + topology mode is not applicable for this field.' + enum: + - HighlyAvailable + - SingleReplica + type: string + platform: + description: "platform is the underlying infrastructure provider for + the cluster. \n Deprecated: Use platformStatus.type instead." + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + platformStatus: + description: platformStatus holds status information specific to the + underlying infrastructure provider. + properties: + alibabaCloud: + description: AlibabaCloud contains settings specific to the Alibaba + Cloud infrastructure provider. + properties: + region: + description: region specifies the region for Alibaba Cloud + resources created for the cluster. + pattern: ^[0-9A-Za-z-]+$ + type: string + resourceGroupID: + description: resourceGroupID is the ID of the resource group + for the cluster. + pattern: ^(rg-[0-9A-Za-z]+)?$ + type: string + resourceTags: + description: resourceTags is a list of additional tags to + apply to Alibaba Cloud resources created for the cluster. + items: + description: AlibabaCloudResourceTag is the set of tags + to add to apply to resources. + properties: + key: + description: key is the key of the tag. + maxLength: 128 + minLength: 1 + type: string + value: + description: value is the value of the tag. + maxLength: 128 + minLength: 1 + type: string + required: + - key + - value + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + required: + - region + type: object + aws: + description: AWS contains settings specific to the Amazon Web + Services infrastructure provider. + properties: + region: + description: region holds the default AWS region for new AWS + resources created by the cluster. + type: string + resourceTags: + description: resourceTags is a list of additional tags to + apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html + for information on tagging AWS resources. AWS supports a + maximum of 50 tags per resource. OpenShift reserves 25 tags + for its use, leaving 25 tags available for the user. + items: + description: AWSResourceTag is a tag to apply to AWS resources + created for the cluster. + properties: + key: + description: key is the key of the tag + maxLength: 128 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + type: string + value: + description: value is the value of the tag. Some AWS + service do not support empty values. Since tags are + added to resources in many services, the length of + the tag value must meet the requirements of all services. + maxLength: 256 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + type: string + required: + - key + - value + type: object + maxItems: 25 + type: array + x-kubernetes-list-type: atomic + serviceEndpoints: + description: ServiceEndpoints list contains custom endpoints + which will override default service endpoint of AWS Services. + There must be only one ServiceEndpoint for a service. + items: + description: AWSServiceEndpoint store the configuration + of a custom url to override existing defaults of AWS Services. + properties: + name: + description: name is the name of the AWS service. The + list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html + This must be provided and cannot be empty. + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with scheme + https, that overrides the default generated endpoint + for a client. This must be provided and cannot be + empty. + pattern: ^https:// + type: string + type: object + type: array + x-kubernetes-list-type: atomic + type: object + azure: + description: Azure contains settings specific to the Azure infrastructure + provider. + properties: + armEndpoint: + description: armEndpoint specifies a URL to use for resource + management in non-soverign clouds such as Azure Stack. + type: string + cloudName: + description: cloudName is the name of the Azure cloud environment + which can be used to configure the Azure SDK with the appropriate + Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`. + enum: + - "" + - AzurePublicCloud + - AzureUSGovernmentCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureStackCloud + type: string + networkResourceGroupName: + description: networkResourceGroupName is the Resource Group + for network resources like the Virtual Network and Subnets + used by the cluster. If empty, the value is same as ResourceGroupName. + type: string + resourceGroupName: + description: resourceGroupName is the Resource Group for new + Azure resources created for the cluster. + type: string + resourceTags: + description: resourceTags is a list of additional tags to + apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags + for information on tagging Azure resources. Due to limitations + on Automation, Content Delivery Network, DNS Azure resources, + a maximum of 15 tags may be applied. OpenShift reserves + 5 tags for internal use, allowing 10 tags for user configuration. + items: + description: AzureResourceTag is a tag to apply to Azure + resources created for the cluster. + properties: + key: + description: key is the key part of the tag. A tag key + can have a maximum of 128 characters and cannot be + empty. Key must begin with a letter, end with a letter, + number or underscore, and must contain only alphanumeric + characters and the following special characters `_ + . -`. + maxLength: 128 + minLength: 1 + pattern: ^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$ + type: string + value: + description: 'value is the value part of the tag. A + tag value can have a maximum of 256 characters and + cannot be empty. Value must contain only alphanumeric + characters and the following special characters `_ + + , - . / : ; < = > ? @`.' + maxLength: 256 + minLength: 1 + pattern: ^[0-9A-Za-z_.=+-@]+$ + type: string + required: + - key + - value + type: object + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: resourceTags are immutable and may only be configured + during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) + type: object + x-kubernetes-validations: + - message: resourceTags may only be configured during installation + rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) + || has(oldSelf.resourceTags) && has(self.resourceTags)' + baremetal: + description: BareMetal contains settings specific to the BareMetal + platform. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to + contact the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. These are the IPs for + a self-hosted load balancer in front of the API servers. + In dual stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + ingressIP: + description: "ingressIP is an external IP which routes to + the default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to + the default ingress controller. The IPs are suitable targets + of a wildcard DNS record used to resolve default route host + names. In dual stack clusters this list contains two IPs + otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used + by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used + by the cluster on BareMetal platform which can be a + user-managed or openshift-managed load balancer that + is to be used for the OpenShift API and Ingress endpoints. + When set to OpenShiftManagedDefault the static pods + in charge of API and Ingress traffic load-balancing + defined in the machine config operator will be deployed. + When set to UserManaged these static pods will not be + deployed and it is expected that the load balancer is + configured out of band by the deployer. When omitted, + this means no opinion and the platform is left to choose + a reasonable default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + machineNetworks: + description: machineNetworks are IP networks used to connect + all the OpenShift cluster nodes. + items: + description: CIDR is an IP address range in CIDR notation + (for example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 32 + type: array + x-kubernetes-list-type: set + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal + DNS used by the nodes. Unlike the one managed by the DNS + operator, `NodeDNSIP` provides name resolution for the nodes + themselves. There is no DNS-as-a-service for BareMetal deployments. + In order to minimize necessary changes to the datacenter + DNS, a DNS service is hosted as a static pod to serve those + hostnames to the nodes in the cluster. + type: string + type: object + equinixMetal: + description: EquinixMetal contains settings specific to the Equinix + Metal infrastructure provider. + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the + default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. + type: string + type: object + external: + description: External contains settings specific to the generic + External infrastructure provider. + properties: + cloudControllerManager: + description: cloudControllerManager contains settings specific + to the external Cloud Controller Manager (a.k.a. CCM or + CPI). When omitted, new nodes will be not tainted and no + extra initialization from the cloud controller manager is + expected. + properties: + state: + description: "state determines whether or not an external + Cloud Controller Manager is expected to be installed + within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager + \n Valid values are \"External\", \"None\" and omitted. + When set to \"External\", new nodes will be tainted + as uninitialized when created, preventing them from + running workloads until they are initialized by the + cloud controller manager. When omitted or set to \"None\", + new nodes will be not tainted and no extra initialization + from the cloud controller manager is expected." + enum: + - "" + - External + - None + type: string + x-kubernetes-validations: + - message: state is immutable once set + rule: self == oldSelf + type: object + x-kubernetes-validations: + - message: state may not be added or removed once set + rule: (has(self.state) == has(oldSelf.state)) || (!has(oldSelf.state) + && self.state != "External") + type: object + x-kubernetes-validations: + - message: cloudControllerManager may not be added or removed + once set + rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) + gcp: + description: GCP contains settings specific to the Google Cloud + Platform infrastructure provider. + properties: + cloudLoadBalancerConfig: + default: + dnsType: PlatformDefault + description: cloudLoadBalancerConfig is a union that contains + the IP addresses of API, API-Int and Ingress Load Balancers + created on the cloud platform. These values would not be + populated on on-prem platforms. These Load Balancer IPs + are used to configure the in-cluster DNS instances for API, + API-Int and Ingress services. `dnsType` is expected to be + set to `ClusterHosted` when these Load Balancer IP addresses + are populated and used. + nullable: true + properties: + clusterHosted: + description: clusterHosted holds the IP addresses of API, + API-Int and Ingress Load Balancers on Cloud Platforms. + The DNS solution hosted within the cluster use these + IP addresses to provide resolution for API, API-Int + and Ingress services. + properties: + apiIntLoadBalancerIPs: + description: apiIntLoadBalancerIPs holds Load Balancer + IPs for the internal API service. These Load Balancer + IP addresses can be IPv4 and/or IPv6 addresses. + Entries in the apiIntLoadBalancerIPs must be unique. + A maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + apiLoadBalancerIPs: + description: apiLoadBalancerIPs holds Load Balancer + IPs for the API service. These Load Balancer IP + addresses can be IPv4 and/or IPv6 addresses. Could + be empty for private clusters. Entries in the apiLoadBalancerIPs + must be unique. A maximum of 16 IP addresses are + permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + ingressLoadBalancerIPs: + description: ingressLoadBalancerIPs holds IPs for + Ingress Load Balancers. These Load Balancer IP addresses + can be IPv4 and/or IPv6 addresses. Entries in the + ingressLoadBalancerIPs must be unique. A maximum + of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + type: object + dnsType: + default: PlatformDefault + description: dnsType indicates the type of DNS solution + in use within the cluster. Its default value of `PlatformDefault` + indicates that the cluster's DNS is the default provided + by the cloud platform. It can be set to `ClusterHosted` + to bypass the configuration of the cloud default DNS. + In this mode, the cluster needs to provide a self-hosted + DNS solution for the cluster's installation to succeed. + The cluster's use of the cloud's Load Balancers is unaffected + by this setting. The value is immutable after it has + been set at install time. Currently, there is no way + for the customer to add additional DNS entries into + the cluster hosted DNS. Enabling this functionality + allows the user to start their own DNS solution outside + the cluster after installation is complete. The customer + would be responsible for configuring this custom DNS + solution, and it can be run in addition to the in-cluster + DNS solution. + enum: + - ClusterHosted + - PlatformDefault + type: string + x-kubernetes-validations: + - message: dnsType is immutable + rule: oldSelf == '' || self == oldSelf + type: object + x-kubernetes-validations: + - message: clusterHosted is permitted only when dnsType is + ClusterHosted + rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' + ? !has(self.clusterHosted) : true' + projectID: + description: resourceGroupName is the Project ID for new GCP + resources created for the cluster. + type: string + region: + description: region holds the region for new GCP resources + created for the cluster. + type: string + resourceLabels: + description: resourceLabels is a list of additional labels + to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources + for information on labeling GCP resources. GCP supports + a maximum of 64 labels per resource. OpenShift reserves + 32 labels for internal use, allowing 32 labels for user + configuration. + items: + description: GCPResourceLabel is a label to apply to GCP + resources created for the cluster. + properties: + key: + description: key is the key part of the label. A label + key can have a maximum of 63 characters and cannot + be empty. Label key must begin with a lowercase letter, + and must contain only lowercase letters, numeric characters, + and the following special characters `_-`. Label key + must not have the reserved prefixes `kubernetes-io` + and `openshift-io`. + maxLength: 63 + minLength: 1 + pattern: ^[a-z][0-9a-z_-]{0,62}$ + type: string + x-kubernetes-validations: + - message: label keys must not start with either `openshift-io` + or `kubernetes-io` + rule: '!self.startsWith(''openshift-io'') && !self.startsWith(''kubernetes-io'')' + value: + description: value is the value part of the label. A + label value can have a maximum of 63 characters and + cannot be empty. Value must contain only lowercase + letters, numeric characters, and the following special + characters `_-`. + maxLength: 63 + minLength: 1 + pattern: ^[0-9a-z_-]{1,63}$ + type: string + required: + - key + - value + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: resourceLabels are immutable and may only be configured + during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) + resourceTags: + description: resourceTags is a list of additional tags to + apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview + for information on tagging GCP resources. GCP supports a + maximum of 50 tags per resource. + items: + description: GCPResourceTag is a tag to apply to GCP resources + created for the cluster. + properties: + key: + description: key is the key part of the tag. A tag key + can have a maximum of 63 characters and cannot be + empty. Tag key must begin and end with an alphanumeric + character, and must contain only uppercase, lowercase + alphanumeric characters, and the following special + characters `._-`. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$ + type: string + parentID: + description: 'parentID is the ID of the hierarchical + resource where the tags are defined, e.g. at the Organization + or the Project level. To find the Organization or + Project ID refer to the following pages: https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, + https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. + An OrganizationID must consist of decimal numbers, + and cannot have leading zeroes. A ProjectID must be + 6 to 30 characters in length, can only contain lowercase + letters, numbers, and hyphens, and must start with + a letter, and cannot end with a hyphen.' + maxLength: 32 + minLength: 1 + pattern: (^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$) + type: string + value: + description: value is the value part of the tag. A tag + value can have a maximum of 63 characters and cannot + be empty. Tag value must begin and end with an alphanumeric + character, and must contain only uppercase, lowercase + alphanumeric characters, and the following special + characters `_-.@%=+:,*#&(){}[]` and spaces. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$ + type: string + required: + - key + - parentID + - value + type: object + maxItems: 50 + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: resourceTags are immutable and may only be configured + during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) + type: object + x-kubernetes-validations: + - message: resourceLabels may only be configured during installation + rule: '!has(oldSelf.resourceLabels) && !has(self.resourceLabels) + || has(oldSelf.resourceLabels) && has(self.resourceLabels)' + - message: resourceTags may only be configured during installation + rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) + || has(oldSelf.resourceTags) && has(self.resourceTags)' + ibmcloud: + description: IBMCloud contains settings specific to the IBMCloud + infrastructure provider. + properties: + cisInstanceCRN: + description: CISInstanceCRN is the CRN of the Cloud Internet + Services instance managing the DNS zone for the cluster's + base domain + type: string + dnsInstanceCRN: + description: DNSInstanceCRN is the CRN of the DNS Services + instance managing the DNS zone for the cluster's base domain + type: string + location: + description: Location is where the cluster has been deployed + type: string + providerType: + description: ProviderType indicates the type of cluster that + was created + type: string + resourceGroupName: + description: ResourceGroupName is the Resource Group for new + IBMCloud resources created for the cluster. + type: string + serviceEndpoints: + description: serviceEndpoints is a list of custom endpoints + which will override the default service endpoints of an + IBM Cloud service. These endpoints are consumed by components + within the cluster to reach the respective IBM Cloud Services. + items: + description: IBMCloudServiceEndpoint stores the configuration + of a custom url to override existing defaults of IBM Cloud + Services. + properties: + name: + description: 'name is the name of the IBM Cloud service. + Possible values are: CIS, COS, DNSServices, GlobalSearch, + GlobalTagging, HyperProtect, IAM, KeyProtect, ResourceController, + ResourceManager, or VPC. For example, the IBM Cloud + Private IAM service could be configured with the service + `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` + Whereas the IBM Cloud Private VPC service for US South + (Dallas) could be configured with the service `name` + of `VPC` and `url` of `https://us.south.private.iaas.cloud.ibm.com`' + enum: + - CIS + - COS + - DNSServices + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - ResourceController + - ResourceManager + - VPC + type: string + url: + description: url is fully qualified URI with scheme + https, that overrides the default generated endpoint + for a client. This must be provided and cannot be + empty. + type: string + x-kubernetes-validations: + - message: url must be a valid absolute URL + rule: isURL(self) + required: + - name + - url + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + kubevirt: + description: Kubevirt contains settings specific to the kubevirt + infrastructure provider. + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the + default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. + type: string + type: object + nutanix: + description: Nutanix contains settings specific to the Nutanix + infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to + contact the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. These are the IPs for + a self-hosted load balancer in front of the API servers. + In dual stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + ingressIP: + description: "ingressIP is an external IP which routes to + the default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to + the default ingress controller. The IPs are suitable targets + of a wildcard DNS record used to resolve default route host + names. In dual stack clusters this list contains two IPs + otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used + by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used + by the cluster on Nutanix platform which can be a user-managed + or openshift-managed load balancer that is to be used + for the OpenShift API and Ingress endpoints. When set + to OpenShiftManagedDefault the static pods in charge + of API and Ingress traffic load-balancing defined in + the machine config operator will be deployed. When set + to UserManaged these static pods will not be deployed + and it is expected that the load balancer is configured + out of band by the deployer. When omitted, this means + no opinion and the platform is left to choose a reasonable + default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + type: object + openstack: + description: OpenStack contains settings specific to the OpenStack + infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to + contact the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. These are the IPs for + a self-hosted load balancer in front of the API servers. + In dual stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + cloudName: + description: cloudName is the name of the desired OpenStack + cloud in the client configuration file (`clouds.yaml`). + type: string + ingressIP: + description: "ingressIP is an external IP which routes to + the default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to + the default ingress controller. The IPs are suitable targets + of a wildcard DNS record used to resolve default route host + names. In dual stack clusters this list contains two IPs + otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used + by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used + by the cluster on OpenStack platform which can be a + user-managed or openshift-managed load balancer that + is to be used for the OpenShift API and Ingress endpoints. + When set to OpenShiftManagedDefault the static pods + in charge of API and Ingress traffic load-balancing + defined in the machine config operator will be deployed. + When set to UserManaged these static pods will not be + deployed and it is expected that the load balancer is + configured out of band by the deployer. When omitted, + this means no opinion and the platform is left to choose + a reasonable default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + machineNetworks: + description: machineNetworks are IP networks used to connect + all the OpenShift cluster nodes. + items: + description: CIDR is an IP address range in CIDR notation + (for example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 32 + type: array + x-kubernetes-list-type: set + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal + DNS used by the nodes. Unlike the one managed by the DNS + operator, `NodeDNSIP` provides name resolution for the nodes + themselves. There is no DNS-as-a-service for OpenStack deployments. + In order to minimize necessary changes to the datacenter + DNS, a DNS service is hosted as a static pod to serve those + hostnames to the nodes in the cluster. + type: string + type: object + ovirt: + description: Ovirt contains settings specific to the oVirt infrastructure + provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to + contact the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. These are the IPs for + a self-hosted load balancer in front of the API servers. + In dual stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + ingressIP: + description: "ingressIP is an external IP which routes to + the default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to + the default ingress controller. The IPs are suitable targets + of a wildcard DNS record used to resolve default route host + names. In dual stack clusters this list contains two IPs + otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used + by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used + by the cluster on Ovirt platform which can be a user-managed + or openshift-managed load balancer that is to be used + for the OpenShift API and Ingress endpoints. When set + to OpenShiftManagedDefault the static pods in charge + of API and Ingress traffic load-balancing defined in + the machine config operator will be deployed. When set + to UserManaged these static pods will not be deployed + and it is expected that the load balancer is configured + out of band by the deployer. When omitted, this means + no opinion and the platform is left to choose a reasonable + default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + nodeDNSIP: + description: 'deprecated: as of 4.6, this field is no longer + set or honored. It will be removed in a future release.' + type: string + type: object + powervs: + description: PowerVS contains settings specific to the Power Systems + Virtual Servers infrastructure provider. + properties: + cisInstanceCRN: + description: CISInstanceCRN is the CRN of the Cloud Internet + Services instance managing the DNS zone for the cluster's + base domain + type: string + dnsInstanceCRN: + description: DNSInstanceCRN is the CRN of the DNS Services + instance managing the DNS zone for the cluster's base domain + type: string + region: + description: region holds the default Power VS region for + new Power VS resources created by the cluster. + type: string + resourceGroup: + description: 'resourceGroup is the resource group name for + new IBMCloud resources created for a cluster. The resource + group specified here will be used by cluster-image-registry-operator + to set up a COS Instance in IBMCloud for the cluster registry. + More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. + When omitted, the image registry operator won''t be able + to configure storage, which results in the image registry + cluster operator not being in an available state.' + maxLength: 40 + pattern: ^[a-zA-Z0-9-_ ]+$ + type: string + x-kubernetes-validations: + - message: resourceGroup is immutable once set + rule: oldSelf == '' || self == oldSelf + serviceEndpoints: + description: serviceEndpoints is a list of custom endpoints + which will override the default service endpoints of a Power + VS service. + items: + description: PowervsServiceEndpoint stores the configuration + of a custom url to override existing defaults of PowerVS + Services. + properties: + name: + description: name is the name of the Power VS service. + Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api + ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller + Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with scheme + https, that overrides the default generated endpoint + for a client. This must be provided and cannot be + empty. + format: uri + pattern: ^https:// + type: string + required: + - name + - url + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + zone: + description: 'zone holds the default zone for the new Power + VS resources created by the cluster. Note: Currently only + single-zone OCP clusters are supported' + type: string + type: object + x-kubernetes-validations: + - message: cannot unset resourceGroup once set + rule: '!has(oldSelf.resourceGroup) || has(self.resourceGroup)' + type: + description: "type is the underlying infrastructure provider for + the cluster. This value controls whether infrastructure automation + such as service load balancers, dynamic volume provisioning, + machine creation and deletion, and other integrations are enabled. + If None, no infrastructure automation is enabled. Allowed values + are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", + \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", + \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components + may not support all platforms, and must handle unrecognized + platforms as None if they do not support that platform. \n This + value will be synced with to the `status.platform` and `status.platformStatus.type`. + Currently this value cannot be changed once set." + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + vsphere: + description: VSphere contains settings specific to the VSphere + infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. It is the IP that the + Infrastructure.status.apiServerInternalURI points to. It + is the IP for a self-hosted load balancer in front of the + API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to + contact the Kubernetes API server that can be used by components + inside the cluster, like kubelets using the infrastructure + rather than Kubernetes networking. These are the IPs for + a self-hosted load balancer in front of the API servers. + In dual stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + ingressIP: + description: "ingressIP is an external IP which routes to + the default ingress controller. The IP is a suitable target + of a wildcard DNS record used to resolve default route host + names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to + the default ingress controller. The IPs are suitable targets + of a wildcard DNS record used to resolve default route host + names. In dual stack clusters this list contains two IPs + otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used + by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used + by the cluster on VSphere platform which can be a user-managed + or openshift-managed load balancer that is to be used + for the OpenShift API and Ingress endpoints. When set + to OpenShiftManagedDefault the static pods in charge + of API and Ingress traffic load-balancing defined in + the machine config operator will be deployed. When set + to UserManaged these static pods will not be deployed + and it is expected that the load balancer is configured + out of band by the deployer. When omitted, this means + no opinion and the platform is left to choose a reasonable + default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + machineNetworks: + description: machineNetworks are IP networks used to connect + all the OpenShift cluster nodes. + items: + description: CIDR is an IP address range in CIDR notation + (for example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 32 + type: array + x-kubernetes-list-type: set + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal + DNS used by the nodes. Unlike the one managed by the DNS + operator, `NodeDNSIP` provides name resolution for the nodes + themselves. There is no DNS-as-a-service for vSphere deployments. + In order to minimize necessary changes to the datacenter + DNS, a DNS service is hosted as a static pod to serve those + hostnames to the nodes in the cluster. + type: string + type: object + type: object + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_networks-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_networks-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..3acf199056 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_networks-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,433 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: networks.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Network + listKind: NetworkList + plural: networks + singular: network + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Network holds cluster-wide information about Network. The canonical + name is `cluster`. It is used to configure the desired network configuration, + such as: IP address pools for services/pod IPs, network plugin, etc. Please + view network.spec for an explanation on what applies when configuring this + resource. \n Compatibility level 1: Stable within a major release for a + minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration. As a general + rule, this SHOULD NOT be read directly. Instead, you should consume + the NetworkStatus, as it indicates the currently deployed configuration. + Currently, most spec fields are immutable after installation. Please + view the individual ones for further details on each. + properties: + clusterNetwork: + description: IP address pool to use for pod IPs. This field is immutable + after installation. + items: + description: ClusterNetworkEntry is a contiguous block of IP addresses + from which pod IPs are allocated. + properties: + cidr: + description: The complete block for pod IPs. + type: string + hostPrefix: + description: The size (prefix) of block to allocate to each + node. If this field is not used by the plugin, it can be left + unset. + format: int32 + minimum: 0 + type: integer + type: object + type: array + externalIP: + description: externalIP defines configuration for controllers that + affect Service.ExternalIP. If nil, then ExternalIP is not allowed + to be set. + properties: + autoAssignCIDRs: + description: autoAssignCIDRs is a list of CIDRs from which to + automatically assign Service.ExternalIP. These are assigned + when the service is of type LoadBalancer. In general, this is + only useful for bare-metal clusters. In Openshift 3.x, this + was misleadingly called "IngressIPs". Automatically assigned + External IPs are not affected by any ExternalIPPolicy rules. + Currently, only one entry may be provided. + items: + type: string + type: array + policy: + description: policy is a set of restrictions applied to the ExternalIP + field. If nil or empty, then ExternalIP is not allowed to be + set. + properties: + allowedCIDRs: + description: allowedCIDRs is the list of allowed CIDRs. + items: + type: string + type: array + rejectedCIDRs: + description: rejectedCIDRs is the list of disallowed CIDRs. + These take precedence over allowedCIDRs. + items: + type: string + type: array + type: object + type: object + networkDiagnostics: + description: "networkDiagnostics defines network diagnostics configuration. + \n Takes precedence over spec.disableNetworkDiagnostics in network.operator.openshift.io. + If networkDiagnostics is not specified or is empty, and the spec.disableNetworkDiagnostics + flag in network.operator.openshift.io is set to true, the network + diagnostics feature will be disabled." + properties: + mode: + description: "mode controls the network diagnostics mode \n When + omitted, this means the user has no opinion and the platform + is left to choose reasonable defaults. These defaults are subject + to change over time. The current default is All." + enum: + - "" + - All + - Disabled + type: string + sourcePlacement: + description: "sourcePlacement controls the scheduling of network + diagnostics source deployment \n See NetworkDiagnosticsSourcePlacement + for more details about default values." + properties: + nodeSelector: + additionalProperties: + type: string + description: "nodeSelector is the node selector applied to + network diagnostics components \n When omitted, this means + the user has no opinion and the platform is left to choose + reasonable defaults. These defaults are subject to change + over time. The current default is `kubernetes.io/os: linux`." + type: object + tolerations: + description: "tolerations is a list of tolerations applied + to network diagnostics components \n When omitted, this + means the user has no opinion and the platform is left to + choose reasonable defaults. These defaults are subject to + change over time. The current default is an empty list." + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + type: object + targetPlacement: + description: "targetPlacement controls the scheduling of network + diagnostics target daemonset \n See NetworkDiagnosticsTargetPlacement + for more details about default values." + properties: + nodeSelector: + additionalProperties: + type: string + description: "nodeSelector is the node selector applied to + network diagnostics components \n When omitted, this means + the user has no opinion and the platform is left to choose + reasonable defaults. These defaults are subject to change + over time. The current default is `kubernetes.io/os: linux`." + type: object + tolerations: + description: "tolerations is a list of tolerations applied + to network diagnostics components \n When omitted, this + means the user has no opinion and the platform is left to + choose reasonable defaults. These defaults are subject to + change over time. The current default is `- operator: \"Exists\"` + which means that all taints are tolerated." + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using + the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. + Empty means match all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule and + NoExecute. + type: string + key: + description: Key is the taint key that the toleration + applies to. Empty means match all taint keys. If the + key is empty, operator must be Exists; this combination + means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship + to the value. Valid operators are Exists and Equal. + Defaults to Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate all taints of + a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period + of time the toleration (which must be of effect NoExecute, + otherwise this field is ignored) tolerates the taint. + By default, it is not set, which means tolerate the + taint forever (do not evict). Zero and negative values + will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration + matches to. If the operator is Exists, the value should + be empty, otherwise just a regular string. + type: string + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + networkType: + description: 'NetworkType is the plugin that is to be deployed (e.g. + OpenShiftSDN). This should match a value that the cluster-network-operator + understands, or else no networking will be installed. Currently + supported values are: - OpenShiftSDN This field is immutable after + installation.' + type: string + serviceNetwork: + description: IP address pool for services. Currently, we only support + a single entry here. This field is immutable after installation. + items: + type: string + type: array + serviceNodePortRange: + description: The port range allowed for Services of type NodePort. + If not specified, the default of 30000-32767 will be used. Such + Services without a NodePort specified will have one automatically + allocated from this range. This parameter can be updated after the + cluster is installed. + pattern: ^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])-([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$ + type: string + type: object + x-kubernetes-validations: + - message: cannot set networkDiagnostics.sourcePlacement and networkDiagnostics.targetPlacement + when networkDiagnostics.mode is Disabled + rule: '!has(self.networkDiagnostics) || !has(self.networkDiagnostics.mode) + || self.networkDiagnostics.mode!=''Disabled'' || !has(self.networkDiagnostics.sourcePlacement) + && !has(self.networkDiagnostics.targetPlacement)' + status: + description: status holds observed values from the cluster. They may not + be overridden. + properties: + clusterNetwork: + description: IP address pool to use for pod IPs. + items: + description: ClusterNetworkEntry is a contiguous block of IP addresses + from which pod IPs are allocated. + properties: + cidr: + description: The complete block for pod IPs. + type: string + hostPrefix: + description: The size (prefix) of block to allocate to each + node. If this field is not used by the plugin, it can be left + unset. + format: int32 + minimum: 0 + type: integer + type: object + type: array + clusterNetworkMTU: + description: ClusterNetworkMTU is the MTU for inter-pod networking. + type: integer + conditions: + description: 'conditions represents the observations of a network.config + current state. Known .status.conditions.type are: "NetworkTypeMigrationInProgress", + "NetworkTypeMigrationMTUReady", "NetworkTypeMigrationTargetCNIAvailable", + "NetworkTypeMigrationTargetCNIInUse", "NetworkTypeMigrationOriginalCNIPurged" + and "NetworkDiagnosticsAvailable"' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + migration: + description: Migration contains the cluster network migration configuration. + properties: + mtu: + description: MTU contains the MTU migration configuration. + properties: + machine: + description: Machine contains MTU migration configuration + for the machine's uplink. + properties: + from: + description: From is the MTU to migrate from. + format: int32 + minimum: 0 + type: integer + to: + description: To is the MTU to migrate to. + format: int32 + minimum: 0 + type: integer + type: object + network: + description: Network contains MTU migration configuration + for the default network. + properties: + from: + description: From is the MTU to migrate from. + format: int32 + minimum: 0 + type: integer + to: + description: To is the MTU to migrate to. + format: int32 + minimum: 0 + type: integer + type: object + type: object + networkType: + description: 'NetworkType is the target plugin that is to be deployed. + Currently supported values are: OpenShiftSDN, OVNKubernetes' + enum: + - OpenShiftSDN + - OVNKubernetes + type: string + type: object + networkType: + description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). + type: string + serviceNetwork: + description: IP address pool for services. Currently, we only support + a single entry here. + items: + type: string + type: array + type: object + required: + - spec + type: object + served: true + storage: true diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..b18926fed6 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.crd-manifests/0000_10_config-operator_01_schedulers-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,130 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: schedulers.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Scheduler + listKind: SchedulerList + plural: schedulers + singular: scheduler + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Scheduler holds cluster-wide config information to run the Kubernetes + Scheduler and influence its placement decisions. The canonical name for + this config is `cluster`. \n Compatibility level 1: Stable within a major + release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + defaultNodeSelector: + description: 'defaultNodeSelector helps set the cluster-wide default + node selector to restrict pod placement to specific nodes. This + is applied to the pods created in all namespaces and creates an + intersection with any existing nodeSelectors already set on a pod, + additionally constraining that pod''s selector. For example, defaultNodeSelector: + "type=user-node,region=east" would set nodeSelector field in pod + spec to "type=user-node,region=east" to all pods created in all + namespaces. Namespaces having project-wide node selectors won''t + be impacted even if this field is set. This adds an annotation section + to the namespace. For example, if a new namespace is created with + node-selector=''type=user-node,region=east'', the annotation openshift.io/node-selector: + type=user-node,region=east gets added to the project. When the openshift.io/node-selector + annotation is set on the project the value is used in preference + to the value we are setting for defaultNodeSelector field. For instance, + openshift.io/node-selector: "type=user-node,region=west" means that + the default of "type=user-node,region=east" set in defaultNodeSelector + would not be applied.' + type: string + mastersSchedulable: + description: 'MastersSchedulable allows masters nodes to be schedulable. + When this flag is turned on, all the master nodes in the cluster + will be made schedulable, so that workload pods can run on them. + The default value for this field is false, meaning none of the master + nodes are schedulable. Important Note: Once the workload pods start + running on the master nodes, extreme care must be taken to ensure + that cluster-critical control plane components are not impacted. + Please turn on this field after doing due diligence.' + type: boolean + policy: + description: 'DEPRECATED: the scheduler Policy API has been deprecated + and will be removed in a future release. policy is a reference to + a ConfigMap containing scheduler policy which has user specified + predicates and priorities. If this ConfigMap is not available scheduler + will default to use DefaultAlgorithmProvider. The namespace for + this configmap is openshift-config.' + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + required: + - name + type: object + profile: + description: "profile sets which scheduling profile should be set + in order to configure scheduling decisions for new pods. \n Valid + values are \"LowNodeUtilization\", \"HighNodeUtilization\", \"NoScoring\" + Defaults to \"LowNodeUtilization\"" + enum: + - "" + - LowNodeUtilization + - HighNodeUtilization + - NoScoring + type: string + profileCustomizations: + description: profileCustomizations contains configuration for modifying + the default behavior of existing scheduler profiles. + properties: + dynamicResourceAllocation: + description: dynamicResourceAllocation allows to enable or disable + dynamic resource allocation within the scheduler. Dynamic resource + allocation is an API for requesting and sharing resources between + pods and containers inside a pod. Third-party resource drivers + are responsible for tracking and allocating resources. Different + kinds of resources support arbitrary parameters for defining + requirements and initialization. Valid values are Enabled, Disabled + and omitted. When omitted, this means no opinion and the platform + is left to choose a reasonable default, which is subject to + change over time. The current default is Disabled. + enum: + - "" + - Enabled + - Disabled + type: string + type: object + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_backups-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_backups-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..2e3adb645b --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_backups-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,142 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1482 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: backups.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "Backup provides configuration for performing backups of the + openshift cluster. \n Compatibility level 4: No compatibility is provided, + the API can change at any point for any reason. These capabilities should + not be used by applications needing long term support." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + etcd: + description: etcd specifies the configuration for periodic backups + of the etcd cluster + properties: + pvcName: + description: PVCName specifies the name of the PersistentVolumeClaim + (PVC) which binds a PersistentVolume where the etcd backup files + would be saved The PVC itself must always be created in the + "openshift-etcd" namespace If the PVC is left unspecified "" + then the platform will choose a reasonable default location + to save the backup. In the future this would be backups saved + across the control-plane master nodes. + type: string + retentionPolicy: + description: RetentionPolicy defines the retention policy for + retaining and deleting existing backups. + properties: + retentionNumber: + description: RetentionNumber configures the retention policy + based on the number of backups + properties: + maxNumberOfBackups: + description: MaxNumberOfBackups defines the maximum number + of backups to retain. If the existing number of backups + saved is equal to MaxNumberOfBackups then the oldest + backup will be removed before a new backup is initiated. + minimum: 1 + type: integer + required: + - maxNumberOfBackups + type: object + retentionSize: + description: RetentionSize configures the retention policy + based on the size of backups + properties: + maxSizeOfBackupsGb: + description: MaxSizeOfBackupsGb defines the total size + in GB of backups to retain. If the current total size + backups exceeds MaxSizeOfBackupsGb then the oldest backup + will be removed before a new backup is initiated. + minimum: 1 + type: integer + required: + - maxSizeOfBackupsGb + type: object + retentionType: + allOf: + - enum: + - RetentionNumber + - RetentionSize + - enum: + - "" + - RetentionNumber + - RetentionSize + description: RetentionType sets the type of retention policy. + Currently, the only valid policies are retention by number + of backups (RetentionNumber), by the size of backups (RetentionSize). + More policies or types may be added in the future. Empty + string means no opinion and the platform is left to choose + a reasonable default which is subject to change without + notice. The current default is RetentionNumber with 15 backups + kept. + type: string + required: + - retentionType + type: object + schedule: + description: 'Schedule defines the recurring backup schedule in + Cron format every 2 hours: 0 */2 * * * every day at 3am: 0 3 + * * * Empty string means no opinion and the platform is left + to choose a reasonable default which is subject to change without + notice. The current default is "no backups", but will change + in the future.' + pattern: ^(@(annually|yearly|monthly|weekly|daily|hourly))|(\*|(?:\*|(?:[0-9]|(?:[1-5][0-9])))\/(?:[0-9]|(?:[1-5][0-9]))|(?:[0-9]|(?:[1-5][0-9]))(?:(?:\-[0-9]|\-(?:[1-5][0-9]))?|(?:\,(?:[0-9]|(?:[1-5][0-9])))*)) + (\*|(?:\*|(?:\*|(?:[0-9]|1[0-9]|2[0-3])))\/(?:[0-9]|1[0-9]|2[0-3])|(?:[0-9]|1[0-9]|2[0-3])(?:(?:\-(?:[0-9]|1[0-9]|2[0-3]))?|(?:\,(?:[0-9]|1[0-9]|2[0-3]))*)) + (\*|(?:[1-9]|(?:[12][0-9])|3[01])(?:(?:\-(?:[1-9]|(?:[12][0-9])|3[01]))?|(?:\,(?:[1-9]|(?:[12][0-9])|3[01]))*)) + (\*|(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)(?:(?:\-(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))?|(?:\,(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))*)) + (\*|(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT)(?:(?:\-(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))?|(?:\,(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))*))$ + type: string + timeZone: + description: The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. + If not specified, this will default to the time zone of the + kube-controller-manager process. See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones + pattern: ^([A-Za-z_]+([+-]*0)*|[A-Za-z_]+(\/[A-Za-z_]+){1,2})(\/GMT[+-]\d{1,2})?$ + type: string + type: object + required: + - etcd + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_clusterimagepolicies-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_clusterimagepolicies-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..79c49e0580 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_clusterimagepolicies-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,398 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1457 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: clusterimagepolicies.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ClusterImagePolicy + listKind: ClusterImagePolicyList + plural: clusterimagepolicies + singular: clusterimagepolicy + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "ClusterImagePolicy holds cluster-wide configuration for image + signature verification \n Compatibility level 4: No compatibility is provided, + the API can change at any point for any reason. These capabilities should + not be used by applications needing long term support." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec contains the configuration for the cluster image policy. + properties: + policy: + description: policy contains configuration to allow scopes to be verified, + and defines how images not matching the verification policy will + be treated. + properties: + rootOfTrust: + description: rootOfTrust specifies the root of trust for the policy. + properties: + fulcioCAWithRekor: + description: 'fulcioCAWithRekor defines the root of trust + based on the Fulcio certificate and the Rekor public key. + For more information about Fulcio and Rekor, please refer + to the document at: https://github.com/sigstore/fulcio and + https://github.com/sigstore/rekor' + properties: + fulcioCAData: + description: fulcioCAData contains inline base64-encoded + data for the PEM format fulcio CA. fulcioCAData must + be at most 8192 characters. + format: byte + maxLength: 8192 + type: string + fulcioSubject: + description: fulcioSubject specifies OIDC issuer and the + email of the Fulcio authentication configuration. + properties: + oidcIssuer: + description: 'oidcIssuer contains the expected OIDC + issuer. It will be verified that the Fulcio-issued + certificate contains a (Fulcio-defined) certificate + extension pointing at this OIDC issuer URL. When + Fulcio issues certificates, it includes a value + based on an URL inside the client-provided ID token. + Example: "https://expected.OIDC.issuer/"' + type: string + x-kubernetes-validations: + - message: oidcIssuer must be a valid URL + rule: isURL(self) + signedEmail: + description: 'signedEmail holds the email address + the the Fulcio certificate is issued for. Example: + "expected-signing-user@example.com"' + type: string + x-kubernetes-validations: + - message: invalid email address + rule: self.matches('^\\S+@\\S+$') + required: + - oidcIssuer + - signedEmail + type: object + rekorKeyData: + description: rekorKeyData contains inline base64-encoded + data for the PEM format from the Rekor public key. rekorKeyData + must be at most 8192 characters. + format: byte + maxLength: 8192 + type: string + required: + - fulcioCAData + - fulcioSubject + - rekorKeyData + type: object + policyType: + description: policyType serves as the union's discriminator. + Users are required to assign a value to this field, choosing + one of the policy types that define the root of trust. "PublicKey" + indicates that the policy relies on a sigstore publicKey + and may optionally use a Rekor verification. "FulcioCAWithRekor" + indicates that the policy is based on the Fulcio certification + and incorporates a Rekor verification. + enum: + - PublicKey + - FulcioCAWithRekor + type: string + publicKey: + description: publicKey defines the root of trust based on + a sigstore public key. + properties: + keyData: + description: keyData contains inline base64-encoded data + for the PEM format public key. KeyData must be at most + 8192 characters. + format: byte + maxLength: 8192 + type: string + rekorKeyData: + description: rekorKeyData contains inline base64-encoded + data for the PEM format from the Rekor public key. rekorKeyData + must be at most 8192 characters. + format: byte + maxLength: 8192 + type: string + required: + - keyData + type: object + required: + - policyType + type: object + x-kubernetes-validations: + - message: publicKey is required when policyType is PublicKey, + and forbidden otherwise + rule: 'has(self.policyType) && self.policyType == ''PublicKey'' + ? has(self.publicKey) : !has(self.publicKey)' + - message: fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, + and forbidden otherwise + rule: 'has(self.policyType) && self.policyType == ''FulcioCAWithRekor'' + ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)' + signedIdentity: + description: signedIdentity specifies what image identity the + signature claims about the image. The required matchPolicy field + specifies the approach used in the verification process to verify + the identity in the signature and the actual image identity, + the default matchPolicy is "MatchRepoDigestOrExact". + properties: + exactRepository: + description: exactRepository is required if matchPolicy is + set to "ExactRepository". + properties: + repository: + description: repository is the reference of the image + identity to be matched. The value should be a repository + name (by omitting the tag or digest) in a registry implementing + the "Docker Registry HTTP API V2". For example, docker.io/library/busybox + maxLength: 512 + type: string + x-kubernetes-validations: + - message: invalid repository or prefix in the signedIdentity, + should not include the tag or digest + rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? + self.matches(''^(localhost:[0-9]+)$''): true' + - message: invalid repository or prefix in the signedIdentity + rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') + required: + - repository + type: object + matchPolicy: + description: matchPolicy sets the type of matching to be used. + Valid values are "MatchRepoDigestOrExact", "MatchRepository", + "ExactRepository", "RemapIdentity". When omitted, the default + value is "MatchRepoDigestOrExact". If set matchPolicy to + ExactRepository, then the exactRepository must be specified. + If set matchPolicy to RemapIdentity, then the remapIdentity + must be specified. "MatchRepoDigestOrExact" means that the + identity in the signature must be in the same repository + as the image identity if the image identity is referenced + by a digest. Otherwise, the identity in the signature must + be the same as the image identity. "MatchRepository" means + that the identity in the signature must be in the same repository + as the image identity. "ExactRepository" means that the + identity in the signature must be in the same repository + as a specific identity specified by "repository". "RemapIdentity" + means that the signature must be in the same as the remapped + image identity. Remapped image identity is obtained by replacing + the "prefix" with the specified “signedPrefix” if the the + image identity matches the specified remapPrefix. + enum: + - MatchRepoDigestOrExact + - MatchRepository + - ExactRepository + - RemapIdentity + type: string + remapIdentity: + description: remapIdentity is required if matchPolicy is set + to "RemapIdentity". + properties: + prefix: + description: prefix is the prefix of the image identity + to be matched. If the image identity matches the specified + prefix, that prefix is replaced by the specified “signedPrefix” + (otherwise it is used as unchanged and no remapping + takes place). This useful when verifying signatures + for a mirror of some other repository namespace that + preserves the vendor’s repository structure. The prefix + and signedPrefix values can be either host[:port] values + (matching exactly the same host[:port], string), repository + namespaces, or repositories (i.e. they must not contain + tags/digests), and match as prefixes of the fully expanded + form. For example, docker.io/library/busybox (not busybox) + to specify that single repository, or docker.io/library + (not an empty string) to specify the parent namespace + of docker.io/library/busybox. + maxLength: 512 + type: string + x-kubernetes-validations: + - message: invalid repository or prefix in the signedIdentity, + should not include the tag or digest + rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? + self.matches(''^(localhost:[0-9]+)$''): true' + - message: invalid repository or prefix in the signedIdentity + rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') + signedPrefix: + description: signedPrefix is the prefix of the image identity + to be matched in the signature. The format is the same + as "prefix". The values can be either host[:port] values + (matching exactly the same host[:port], string), repository + namespaces, or repositories (i.e. they must not contain + tags/digests), and match as prefixes of the fully expanded + form. For example, docker.io/library/busybox (not busybox) + to specify that single repository, or docker.io/library + (not an empty string) to specify the parent namespace + of docker.io/library/busybox. + maxLength: 512 + type: string + x-kubernetes-validations: + - message: invalid repository or prefix in the signedIdentity, + should not include the tag or digest + rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? + self.matches(''^(localhost:[0-9]+)$''): true' + - message: invalid repository or prefix in the signedIdentity + rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') + required: + - prefix + - signedPrefix + type: object + required: + - matchPolicy + type: object + x-kubernetes-validations: + - message: exactRepository is required when matchPolicy is ExactRepository, + and forbidden otherwise + rule: '(has(self.matchPolicy) && self.matchPolicy == ''ExactRepository'') + ? has(self.exactRepository) : !has(self.exactRepository)' + - message: remapIdentity is required when matchPolicy is RemapIdentity, + and forbidden otherwise + rule: '(has(self.matchPolicy) && self.matchPolicy == ''RemapIdentity'') + ? has(self.remapIdentity) : !has(self.remapIdentity)' + required: + - rootOfTrust + type: object + scopes: + description: 'scopes defines the list of image identities assigned + to a policy. Each item refers to a scope in a registry implementing + the "Docker Registry HTTP API V2". Scopes matching individual images + are named Docker references in the fully expanded form, either using + a tag or digest. For example, docker.io/library/busybox:latest (not + busybox:latest). More general scopes are prefixes of individual-image + scopes, and specify a repository (by omitting the tag or digest), + a repository namespace, or a registry host (by only specifying the + host name and possibly a port number) or a wildcard expression starting + with `*.`, for matching all subdomains (not including a port number). + Wildcards are only supported for subdomain matching, and may not + be used in the middle of the host, i.e. *.example.com is a valid + case, but example*.*.com is not. Please be aware that the scopes + should not be nested under the repositories of OpenShift Container + Platform images. If configured, the policies for OpenShift Container + Platform repositories will not be in effect. For additional details + about the format, please refer to the document explaining the docker + transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker' + items: + maxLength: 512 + type: string + x-kubernetes-validations: + - message: invalid image scope format, scope must contain a fully + qualified domain name or 'localhost' + rule: 'size(self.split(''/'')[0].split(''.'')) == 1 ? self.split(''/'')[0].split(''.'')[0].split('':'')[0] + == ''localhost'' : true' + - message: invalid image scope with wildcard, a wildcard can only + be at the start of the domain and is only supported for subdomain + matching, not path matching + rule: 'self.contains(''*'') ? self.matches(''^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$'') + : true' + - message: invalid repository namespace or image specification in + the image scope + rule: '!self.contains(''*'') ? self.matches(''^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$'') + : true' + maxItems: 256 + type: array + x-kubernetes-list-type: set + required: + - policy + - scopes + type: object + status: + description: status contains the observed state of the resource. + properties: + conditions: + description: conditions provide details on the status of this API + Resource. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_imagepolicies-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_imagepolicies-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..38d721a9ab --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_imagepolicies-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,398 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1457 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: imagepolicies.config.openshift.io +spec: + group: config.openshift.io + names: + kind: ImagePolicy + listKind: ImagePolicyList + plural: imagepolicies + singular: imagepolicy + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "ImagePolicy holds namespace-wide configuration for image signature + verification \n Compatibility level 4: No compatibility is provided, the + API can change at any point for any reason. These capabilities should not + be used by applications needing long term support." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + policy: + description: policy contains configuration to allow scopes to be verified, + and defines how images not matching the verification policy will + be treated. + properties: + rootOfTrust: + description: rootOfTrust specifies the root of trust for the policy. + properties: + fulcioCAWithRekor: + description: 'fulcioCAWithRekor defines the root of trust + based on the Fulcio certificate and the Rekor public key. + For more information about Fulcio and Rekor, please refer + to the document at: https://github.com/sigstore/fulcio and + https://github.com/sigstore/rekor' + properties: + fulcioCAData: + description: fulcioCAData contains inline base64-encoded + data for the PEM format fulcio CA. fulcioCAData must + be at most 8192 characters. + format: byte + maxLength: 8192 + type: string + fulcioSubject: + description: fulcioSubject specifies OIDC issuer and the + email of the Fulcio authentication configuration. + properties: + oidcIssuer: + description: 'oidcIssuer contains the expected OIDC + issuer. It will be verified that the Fulcio-issued + certificate contains a (Fulcio-defined) certificate + extension pointing at this OIDC issuer URL. When + Fulcio issues certificates, it includes a value + based on an URL inside the client-provided ID token. + Example: "https://expected.OIDC.issuer/"' + type: string + x-kubernetes-validations: + - message: oidcIssuer must be a valid URL + rule: isURL(self) + signedEmail: + description: 'signedEmail holds the email address + the the Fulcio certificate is issued for. Example: + "expected-signing-user@example.com"' + type: string + x-kubernetes-validations: + - message: invalid email address + rule: self.matches('^\\S+@\\S+$') + required: + - oidcIssuer + - signedEmail + type: object + rekorKeyData: + description: rekorKeyData contains inline base64-encoded + data for the PEM format from the Rekor public key. rekorKeyData + must be at most 8192 characters. + format: byte + maxLength: 8192 + type: string + required: + - fulcioCAData + - fulcioSubject + - rekorKeyData + type: object + policyType: + description: policyType serves as the union's discriminator. + Users are required to assign a value to this field, choosing + one of the policy types that define the root of trust. "PublicKey" + indicates that the policy relies on a sigstore publicKey + and may optionally use a Rekor verification. "FulcioCAWithRekor" + indicates that the policy is based on the Fulcio certification + and incorporates a Rekor verification. + enum: + - PublicKey + - FulcioCAWithRekor + type: string + publicKey: + description: publicKey defines the root of trust based on + a sigstore public key. + properties: + keyData: + description: keyData contains inline base64-encoded data + for the PEM format public key. KeyData must be at most + 8192 characters. + format: byte + maxLength: 8192 + type: string + rekorKeyData: + description: rekorKeyData contains inline base64-encoded + data for the PEM format from the Rekor public key. rekorKeyData + must be at most 8192 characters. + format: byte + maxLength: 8192 + type: string + required: + - keyData + type: object + required: + - policyType + type: object + x-kubernetes-validations: + - message: publicKey is required when policyType is PublicKey, + and forbidden otherwise + rule: 'has(self.policyType) && self.policyType == ''PublicKey'' + ? has(self.publicKey) : !has(self.publicKey)' + - message: fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, + and forbidden otherwise + rule: 'has(self.policyType) && self.policyType == ''FulcioCAWithRekor'' + ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)' + signedIdentity: + description: signedIdentity specifies what image identity the + signature claims about the image. The required matchPolicy field + specifies the approach used in the verification process to verify + the identity in the signature and the actual image identity, + the default matchPolicy is "MatchRepoDigestOrExact". + properties: + exactRepository: + description: exactRepository is required if matchPolicy is + set to "ExactRepository". + properties: + repository: + description: repository is the reference of the image + identity to be matched. The value should be a repository + name (by omitting the tag or digest) in a registry implementing + the "Docker Registry HTTP API V2". For example, docker.io/library/busybox + maxLength: 512 + type: string + x-kubernetes-validations: + - message: invalid repository or prefix in the signedIdentity, + should not include the tag or digest + rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? + self.matches(''^(localhost:[0-9]+)$''): true' + - message: invalid repository or prefix in the signedIdentity + rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') + required: + - repository + type: object + matchPolicy: + description: matchPolicy sets the type of matching to be used. + Valid values are "MatchRepoDigestOrExact", "MatchRepository", + "ExactRepository", "RemapIdentity". When omitted, the default + value is "MatchRepoDigestOrExact". If set matchPolicy to + ExactRepository, then the exactRepository must be specified. + If set matchPolicy to RemapIdentity, then the remapIdentity + must be specified. "MatchRepoDigestOrExact" means that the + identity in the signature must be in the same repository + as the image identity if the image identity is referenced + by a digest. Otherwise, the identity in the signature must + be the same as the image identity. "MatchRepository" means + that the identity in the signature must be in the same repository + as the image identity. "ExactRepository" means that the + identity in the signature must be in the same repository + as a specific identity specified by "repository". "RemapIdentity" + means that the signature must be in the same as the remapped + image identity. Remapped image identity is obtained by replacing + the "prefix" with the specified “signedPrefix” if the the + image identity matches the specified remapPrefix. + enum: + - MatchRepoDigestOrExact + - MatchRepository + - ExactRepository + - RemapIdentity + type: string + remapIdentity: + description: remapIdentity is required if matchPolicy is set + to "RemapIdentity". + properties: + prefix: + description: prefix is the prefix of the image identity + to be matched. If the image identity matches the specified + prefix, that prefix is replaced by the specified “signedPrefix” + (otherwise it is used as unchanged and no remapping + takes place). This useful when verifying signatures + for a mirror of some other repository namespace that + preserves the vendor’s repository structure. The prefix + and signedPrefix values can be either host[:port] values + (matching exactly the same host[:port], string), repository + namespaces, or repositories (i.e. they must not contain + tags/digests), and match as prefixes of the fully expanded + form. For example, docker.io/library/busybox (not busybox) + to specify that single repository, or docker.io/library + (not an empty string) to specify the parent namespace + of docker.io/library/busybox. + maxLength: 512 + type: string + x-kubernetes-validations: + - message: invalid repository or prefix in the signedIdentity, + should not include the tag or digest + rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? + self.matches(''^(localhost:[0-9]+)$''): true' + - message: invalid repository or prefix in the signedIdentity + rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') + signedPrefix: + description: signedPrefix is the prefix of the image identity + to be matched in the signature. The format is the same + as "prefix". The values can be either host[:port] values + (matching exactly the same host[:port], string), repository + namespaces, or repositories (i.e. they must not contain + tags/digests), and match as prefixes of the fully expanded + form. For example, docker.io/library/busybox (not busybox) + to specify that single repository, or docker.io/library + (not an empty string) to specify the parent namespace + of docker.io/library/busybox. + maxLength: 512 + type: string + x-kubernetes-validations: + - message: invalid repository or prefix in the signedIdentity, + should not include the tag or digest + rule: 'self.matches(''.*:([\\w][\\w.-]{0,127})$'')? + self.matches(''^(localhost:[0-9]+)$''): true' + - message: invalid repository or prefix in the signedIdentity + rule: self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$') + required: + - prefix + - signedPrefix + type: object + required: + - matchPolicy + type: object + x-kubernetes-validations: + - message: exactRepository is required when matchPolicy is ExactRepository, + and forbidden otherwise + rule: '(has(self.matchPolicy) && self.matchPolicy == ''ExactRepository'') + ? has(self.exactRepository) : !has(self.exactRepository)' + - message: remapIdentity is required when matchPolicy is RemapIdentity, + and forbidden otherwise + rule: '(has(self.matchPolicy) && self.matchPolicy == ''RemapIdentity'') + ? has(self.remapIdentity) : !has(self.remapIdentity)' + required: + - rootOfTrust + type: object + scopes: + description: 'scopes defines the list of image identities assigned + to a policy. Each item refers to a scope in a registry implementing + the "Docker Registry HTTP API V2". Scopes matching individual images + are named Docker references in the fully expanded form, either using + a tag or digest. For example, docker.io/library/busybox:latest (not + busybox:latest). More general scopes are prefixes of individual-image + scopes, and specify a repository (by omitting the tag or digest), + a repository namespace, or a registry host (by only specifying the + host name and possibly a port number) or a wildcard expression starting + with `*.`, for matching all subdomains (not including a port number). + Wildcards are only supported for subdomain matching, and may not + be used in the middle of the host, i.e. *.example.com is a valid + case, but example*.*.com is not. Please be aware that the scopes + should not be nested under the repositories of OpenShift Container + Platform images. If configured, the policies for OpenShift Container + Platform repositories will not be in effect. For additional details + about the format, please refer to the document explaining the docker + transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker' + items: + maxLength: 512 + type: string + x-kubernetes-validations: + - message: invalid image scope format, scope must contain a fully + qualified domain name or 'localhost' + rule: 'size(self.split(''/'')[0].split(''.'')) == 1 ? self.split(''/'')[0].split(''.'')[0].split('':'')[0] + == ''localhost'' : true' + - message: invalid image scope with wildcard, a wildcard can only + be at the start of the domain and is only supported for subdomain + matching, not path matching + rule: 'self.contains(''*'') ? self.matches(''^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$'') + : true' + - message: invalid repository namespace or image specification in + the image scope + rule: '!self.contains(''*'') ? self.matches(''^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$'') + : true' + maxItems: 256 + type: array + x-kubernetes-list-type: set + required: + - policy + - scopes + type: object + status: + description: status contains the observed state of the resource. + properties: + conditions: + description: conditions provide details on the status of this API + Resource. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..3d8946b93c --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.crd-manifests/0000_10_config-operator_01_insightsdatagathers-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,88 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1245 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: insightsdatagathers.config.openshift.io +spec: + group: config.openshift.io + names: + kind: InsightsDataGather + listKind: InsightsDataGatherList + plural: insightsdatagathers + singular: insightsdatagather + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "InsightsDataGather provides data gather configuration options + for the the Insights Operator. \n Compatibility level 4: No compatibility + is provided, the API can change at any point for any reason. These capabilities + should not be used by applications needing long term support." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + gatherConfig: + description: gatherConfig spec attribute includes all the configuration + options related to gathering of the Insights data and its uploading + to the ingress. + properties: + dataPolicy: + description: dataPolicy allows user to enable additional global + obfuscation of the IP addresses and base domain in the Insights + archive data. Valid values are "None" and "ObfuscateNetworking". + When set to None the data is not obfuscated. When set to ObfuscateNetworking + the IP addresses and the cluster domain name are obfuscated. + When omitted, this means no opinion and the platform is left + to choose a reasonable default, which is subject to change over + time. The current default is None. + enum: + - "" + - None + - ObfuscateNetworking + type: string + disabledGatherers: + description: 'disabledGatherers is a list of gatherers to be excluded + from the gathering. All the gatherers can be disabled by providing + "all" value. If all the gatherers are disabled, the Insights + operator does not gather any data. The particular gatherers + IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. + Run the following command to get the names of last active gatherers: + "oc get insightsoperators.operator.openshift.io cluster -o json + | jq ''.status.gatherStatus.gatherers[].name''" An example of + disabling gatherers looks like this: `disabledGatherers: ["clusterconfig/machine_configs", + "workloads/workload_info"]`' + items: + type: string + type: array + type: object + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/features.md b/vendor/github.com/openshift/api/features.md index 8f6c752462..6d3bc87896 100644 --- a/vendor/github.com/openshift/api/features.md +++ b/vendor/github.com/openshift/api/features.md @@ -1,60 +1,63 @@ -| FeatureGate | Default on Hypershift | Default on SelfManagedHA | TechPreviewNoUpgrade on Hypershift | TechPreviewNoUpgrade on SelfManagedHA | -| ------ | --- | --- | --- | --- | -| AutomatedEtcdBackup| | | Enabled | Enabled | -| CSIDriverSharedResource| | | Enabled | Enabled | -| DNSNameResolver| | | Enabled | Enabled | -| DynamicResourceAllocation| | | Enabled | Enabled | -| Example| | | Enabled | Enabled | -| ExternalRouteCertificate| | | Enabled | Enabled | -| GCPClusterHostedDNS| | | Enabled | Enabled | -| GCPLabelsTags| | | Enabled | Enabled | -| GatewayAPI| | | Enabled | Enabled | -| HardwareSpeed| | | Enabled | Enabled | -| ImagePolicy| | | Enabled | Enabled | -| InsightsConfig| | | Enabled | Enabled | -| InsightsConfigAPI| | | Enabled | Enabled | -| InsightsOnDemandDataGather| | | Enabled | Enabled | -| InstallAlternateInfrastructureAWS| | | Enabled | Enabled | -| MachineAPIProviderOpenStack| | | Enabled | Enabled | -| MachineConfigNodes| | | Enabled | Enabled | -| ManagedBootImages| | | Enabled | Enabled | -| MaxUnavailableStatefulSet| | | Enabled | Enabled | -| MetricsCollectionProfiles| | | Enabled | Enabled | -| MetricsServer| | | Enabled | Enabled | -| MixedCPUsAllocation| | | Enabled | Enabled | -| NetworkDiagnosticsConfig| | | Enabled | Enabled | -| NewOLM| | | Enabled | Enabled | -| NodeDisruptionPolicy| | | Enabled | Enabled | -| NodeSwap| | | Enabled | Enabled | -| OnClusterBuild| | | Enabled | Enabled | -| PinnedImages| | | Enabled | Enabled | -| PlatformOperators| | | Enabled | Enabled | -| RouteExternalCertificate| | | Enabled | Enabled | -| ServiceAccountTokenNodeBinding| | | Enabled | Enabled | -| ServiceAccountTokenNodeBindingValidation| | | Enabled | Enabled | -| ServiceAccountTokenPodNodeInfo| | | Enabled | Enabled | -| SignatureStores| | | Enabled | Enabled | -| SigstoreImageVerification| | | Enabled | Enabled | -| TranslateStreamCloseWebsocketRequests| | | Enabled | Enabled | -| UpgradeStatus| | | Enabled | Enabled | -| VSphereDriverConfiguration| | | Enabled | Enabled | -| ValidatingAdmissionPolicy| | | Enabled | Enabled | -| VolumeGroupSnapshot| | | Enabled | Enabled | -| ExternalOIDC| Enabled | | Enabled | Enabled | -| AdminNetworkPolicy| Enabled | Enabled | Enabled | Enabled | -| AlibabaPlatform| Enabled | Enabled | Enabled | Enabled | -| AzureWorkloadIdentity| Enabled | Enabled | Enabled | Enabled | -| BareMetalLoadBalancer| Enabled | Enabled | Enabled | Enabled | -| BuildCSIVolumes| Enabled | Enabled | Enabled | Enabled | -| CloudDualStackNodeIPs| Enabled | Enabled | Enabled | Enabled | -| DisableKubeletCloudCredentialProviders| Enabled | Enabled | Enabled | Enabled | -| ExternalCloudProvider| Enabled | Enabled | Enabled | Enabled | -| ExternalCloudProviderAzure| Enabled | Enabled | Enabled | Enabled | -| ExternalCloudProviderExternal| Enabled | Enabled | Enabled | Enabled | -| ExternalCloudProviderGCP| Enabled | Enabled | Enabled | Enabled | -| KMSv1| Enabled | Enabled | Enabled | Enabled | -| NetworkLiveMigration| Enabled | Enabled | Enabled | Enabled | -| OpenShiftPodSecurityAdmission| Enabled | Enabled | Enabled | Enabled | -| PrivateHostedZoneAWS| Enabled | Enabled | Enabled | Enabled | -| VSphereControlPlaneMachineSet| Enabled | Enabled | Enabled | Enabled | -| VSphereStaticIPs| Enabled | Enabled | Enabled | Enabled | +| FeatureGate | Default on Hypershift | Default on SelfManagedHA | DevPreviewNoUpgrade on Hypershift | DevPreviewNoUpgrade on SelfManagedHA | TechPreviewNoUpgrade on Hypershift | TechPreviewNoUpgrade on SelfManagedHA | +| ------ | --- | --- | --- | --- | --- | --- | +| ClusterAPIInstall| | | | | | | +| EventedPLEG| | | | | | | +| MachineAPIOperatorDisableMachineHealthCheckController| | | | | | | +| AutomatedEtcdBackup| | | Enabled | Enabled | Enabled | Enabled | +| CSIDriverSharedResource| | | Enabled | Enabled | Enabled | Enabled | +| DNSNameResolver| | | Enabled | Enabled | Enabled | Enabled | +| DynamicResourceAllocation| | | Enabled | Enabled | Enabled | Enabled | +| Example| | | Enabled | Enabled | Enabled | Enabled | +| ExternalRouteCertificate| | | Enabled | Enabled | Enabled | Enabled | +| GCPClusterHostedDNS| | | Enabled | Enabled | Enabled | Enabled | +| GCPLabelsTags| | | Enabled | Enabled | Enabled | Enabled | +| GatewayAPI| | | Enabled | Enabled | Enabled | Enabled | +| HardwareSpeed| | | Enabled | Enabled | Enabled | Enabled | +| ImagePolicy| | | Enabled | Enabled | Enabled | Enabled | +| InsightsConfig| | | Enabled | Enabled | Enabled | Enabled | +| InsightsConfigAPI| | | Enabled | Enabled | Enabled | Enabled | +| InsightsOnDemandDataGather| | | Enabled | Enabled | Enabled | Enabled | +| InstallAlternateInfrastructureAWS| | | Enabled | Enabled | Enabled | Enabled | +| MachineAPIProviderOpenStack| | | Enabled | Enabled | Enabled | Enabled | +| MachineConfigNodes| | | Enabled | Enabled | Enabled | Enabled | +| ManagedBootImages| | | Enabled | Enabled | Enabled | Enabled | +| MaxUnavailableStatefulSet| | | Enabled | Enabled | Enabled | Enabled | +| MetricsCollectionProfiles| | | Enabled | Enabled | Enabled | Enabled | +| MixedCPUsAllocation| | | Enabled | Enabled | Enabled | Enabled | +| NetworkDiagnosticsConfig| | | Enabled | Enabled | Enabled | Enabled | +| NewOLM| | | Enabled | Enabled | Enabled | Enabled | +| NodeDisruptionPolicy| | | Enabled | Enabled | Enabled | Enabled | +| NodeSwap| | | Enabled | Enabled | Enabled | Enabled | +| OnClusterBuild| | | Enabled | Enabled | Enabled | Enabled | +| PinnedImages| | | Enabled | Enabled | Enabled | Enabled | +| PlatformOperators| | | Enabled | Enabled | Enabled | Enabled | +| RouteExternalCertificate| | | Enabled | Enabled | Enabled | Enabled | +| ServiceAccountTokenNodeBinding| | | Enabled | Enabled | Enabled | Enabled | +| ServiceAccountTokenNodeBindingValidation| | | Enabled | Enabled | Enabled | Enabled | +| ServiceAccountTokenPodNodeInfo| | | Enabled | Enabled | Enabled | Enabled | +| SignatureStores| | | Enabled | Enabled | Enabled | Enabled | +| SigstoreImageVerification| | | Enabled | Enabled | Enabled | Enabled | +| TranslateStreamCloseWebsocketRequests| | | Enabled | Enabled | Enabled | Enabled | +| UpgradeStatus| | | Enabled | Enabled | Enabled | Enabled | +| VSphereDriverConfiguration| | | Enabled | Enabled | Enabled | Enabled | +| ValidatingAdmissionPolicy| | | Enabled | Enabled | Enabled | Enabled | +| VolumeGroupSnapshot| | | Enabled | Enabled | Enabled | Enabled | +| ExternalOIDC| Enabled | | Enabled | Enabled | Enabled | Enabled | +| AdminNetworkPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| AlibabaPlatform| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| AzureWorkloadIdentity| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| BareMetalLoadBalancer| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| BuildCSIVolumes| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| CloudDualStackNodeIPs| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| DisableKubeletCloudCredentialProviders| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ExternalCloudProvider| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ExternalCloudProviderAzure| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ExternalCloudProviderExternal| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ExternalCloudProviderGCP| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| KMSv1| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| MetricsServer| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| NetworkLiveMigration| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| OpenShiftPodSecurityAdmission| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| PrivateHostedZoneAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| VSphereControlPlaneMachineSet| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| VSphereStaticIPs| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/types.go b/vendor/github.com/openshift/api/machineconfiguration/v1/types.go index a67f9d59d9..574c90035a 100644 --- a/vendor/github.com/openshift/api/machineconfiguration/v1/types.go +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/types.go @@ -509,7 +509,7 @@ type MachineConfigPoolStatus struct { // +listType=map // +listMapKey=poolSynchronizerType // +optional - PoolSynchronizersStatus []PoolSynchronizerStatus `json:"poolSynchronizersStatus"` + PoolSynchronizersStatus []PoolSynchronizerStatus `json:"poolSynchronizersStatus,omitempty"` } // +kubebuilder:validation:XValidation:rule="self.machineCount >= self.updatedMachineCount", message="machineCount must be greater than or equal to updatedMachineCount" diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..a62c75eb2e --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_controllerconfigs-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,2759 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1453 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: controllerconfigs.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: ControllerConfig + listKind: ControllerConfigList + plural: controllerconfigs + singular: controllerconfig + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ControllerConfig describes configuration for MachineConfigController. + This is currently only used to drive the MachineConfig objects generated + by the TemplateController. \n Compatibility level 1: Stable within a major + release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ControllerConfigSpec is the spec for ControllerConfig resource. + properties: + additionalTrustBundle: + description: additionalTrustBundle is a certificate bundle that will + be added to the nodes trusted certificate store. + format: byte + nullable: true + type: string + baseOSContainerImage: + description: BaseOSContainerImage is the new-format container image + for operating system updates. + type: string + baseOSExtensionsContainerImage: + description: BaseOSExtensionsContainerImage is the matching extensions + container for the new-format container + type: string + cloudProviderCAData: + description: cloudProvider specifies the cloud provider CA data + format: byte + nullable: true + type: string + cloudProviderConfig: + description: cloudProviderConfig is the configuration for the given + cloud provider + type: string + clusterDNSIP: + description: clusterDNSIP is the cluster DNS IP address + type: string + dns: + description: dns holds the cluster dns details + nullable: true + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'metadata is the standard object''s metadata. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + spec: + description: spec holds user settable values for configuration + properties: + baseDomain: + description: "baseDomain is the base domain of the cluster. + All managed DNS records will be sub-domains of this base. + \n For example, given the base domain `openshift.example.com`, + an API server DNS record may be created for `cluster-api.openshift.example.com`. + \n Once set, this field cannot be changed." + type: string + platform: + description: platform holds configuration specific to the + underlying infrastructure provider for DNS. When omitted, + this means the user has no opinion and the platform is left + to choose reasonable defaults. These defaults are subject + to change over time. + properties: + aws: + description: aws contains DNS configuration specific to + the Amazon Web Services cloud provider. + properties: + privateZoneIAMRole: + description: privateZoneIAMRole contains the ARN of + an IAM role that should be assumed when performing + operations on the cluster's private hosted zone + specified in the cluster DNS config. When left empty, + no role should be assumed. + pattern: ^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$ + type: string + type: object + type: + description: "type is the underlying infrastructure provider + for the cluster. Allowed values: \"\", \"AWS\". \n Individual + components may not support all platforms, and must handle + unrecognized platforms with best-effort defaults." + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + x-kubernetes-validations: + - message: allowed values are '' and 'AWS' + rule: self in ['','AWS'] + required: + - type + type: object + x-kubernetes-validations: + - message: aws configuration is required when platform is + AWS, and forbidden otherwise + rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) + : !has(self.aws)' + privateZone: + description: "privateZone is the location where all the DNS + records that are only available internally to the cluster + exist. \n If this field is nil, no private records should + be created. \n Once set, this field cannot be changed." + properties: + id: + description: "id is the identifier that can be used to + find the DNS hosted zone. \n on AWS zone can be fetched + using `ID` as id in [1] on Azure zone can be fetched + using `ID` as a pre-determined name in [2], on GCP zone + can be fetched using `ID` as a pre-determined name in + [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + additionalProperties: + type: string + description: "tags can be used to query the DNS hosted + zone. \n on AWS, resourcegroupstaggingapi [1] can be + used to fetch a zone using `Tags` as tag-filters, \n + [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + type: object + publicZone: + description: "publicZone is the location where all the DNS + records that are publicly accessible to the internet exist. + \n If this field is nil, no public records should be created. + \n Once set, this field cannot be changed." + properties: + id: + description: "id is the identifier that can be used to + find the DNS hosted zone. \n on AWS zone can be fetched + using `ID` as id in [1] on Azure zone can be fetched + using `ID` as a pre-determined name in [2], on GCP zone + can be fetched using `ID` as a pre-determined name in + [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + additionalProperties: + type: string + description: "tags can be used to query the DNS hosted + zone. \n on AWS, resourcegroupstaggingapi [1] can be + used to fetch a zone using `Tags` as tag-filters, \n + [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + type: object + type: object + status: + description: status holds observed values from the cluster. They + may not be overridden. + type: object + required: + - spec + type: object + x-kubernetes-embedded-resource: true + etcdDiscoveryDomain: + description: etcdDiscoveryDomain is deprecated, use Infra.Status.EtcdDiscoveryDomain + instead + type: string + imageRegistryBundleData: + description: imageRegistryBundleData is the ImageRegistryData + items: + description: ImageRegistryBundle contains information for writing + image registry certificates + properties: + data: + description: data holds the contents of the bundle that will + be written to the file location + format: byte + type: string + file: + description: file holds the name of the file where the bundle + will be written to disk + type: string + required: + - data + - file + type: object + type: array + x-kubernetes-list-type: atomic + imageRegistryBundleUserData: + description: imageRegistryBundleUserData is Image Registry Data provided + by the user + items: + description: ImageRegistryBundle contains information for writing + image registry certificates + properties: + data: + description: data holds the contents of the bundle that will + be written to the file location + format: byte + type: string + file: + description: file holds the name of the file where the bundle + will be written to disk + type: string + required: + - data + - file + type: object + type: array + x-kubernetes-list-type: atomic + images: + additionalProperties: + type: string + description: images is map of images that are used by the controller + to render templates under ./templates/ + type: object + infra: + description: infra holds the infrastructure details + nullable: true + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this + representation of an object. Servers should convert recognized + schemas to the latest internal value, and may reject unrecognized + values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint + the client submits requests to. Cannot be updated. In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'metadata is the standard object''s metadata. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + spec: + description: spec holds user settable values for configuration + properties: + cloudConfig: + description: "cloudConfig is a reference to a ConfigMap containing + the cloud provider configuration file. This configuration + file is used to configure the Kubernetes cloud provider + integration when using the built-in cloud provider integration + or the external cloud controller manager. The namespace + for this config map is openshift-config. \n cloudConfig + should only be consumed by the kube_cloud_config controller. + The controller is responsible for using the user configuration + in the spec for various platforms and combining that with + the user provided ConfigMap in this field to create a stitched + kube cloud config. The controller generates a ConfigMap + `kube-cloud-config` in `openshift-config-managed` namespace + with the kube cloud config is stored in `cloud.conf` key. + All the clients are expected to use the generated ConfigMap + only." + properties: + key: + description: Key allows pointing to a specific key/value + inside of the configmap. This is useful for logical + file references. + type: string + name: + type: string + type: object + platformSpec: + description: platformSpec holds desired information specific + to the underlying infrastructure provider. + properties: + alibabaCloud: + description: AlibabaCloud contains settings specific to + the Alibaba Cloud infrastructure provider. + type: object + aws: + description: AWS contains settings specific to the Amazon + Web Services infrastructure provider. + properties: + serviceEndpoints: + description: serviceEndpoints list contains custom + endpoints which will override default service endpoint + of AWS Services. There must be only one ServiceEndpoint + for a service. + items: + description: AWSServiceEndpoint store the configuration + of a custom url to override existing defaults + of AWS Services. + properties: + name: + description: name is the name of the AWS service. + The list of all the service names can be found + at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html + This must be provided and cannot be empty. + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with + scheme https, that overrides the default generated + endpoint for a client. This must be provided + and cannot be empty. + pattern: ^https:// + type: string + type: object + type: array + x-kubernetes-list-type: atomic + type: object + azure: + description: Azure contains settings specific to the Azure + infrastructure provider. + type: object + baremetal: + description: BareMetal contains settings specific to the + BareMetal platform. + properties: + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. These are the IPs for a self-hosted + load balancer in front of the API servers. In dual + stack clusters this list contains two IP addresses, + one from IPv4 family and one from IPv6. In single + stack clusters a single IP address is expected. + When omitted, values from the status.apiServerInternalIPs + will be used. Once set, the list cannot be completely + removed (but its second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: apiServerInternalIPs must contain at most + one IPv4 address and at most one IPv6 address + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() + : true' + ingressIPs: + description: ingressIPs are the external IPs which + route to the default ingress controller. The IPs + are suitable targets of a wildcard DNS record used + to resolve default route host names. In dual stack + clusters this list contains two IP addresses, one + from IPv4 family and one from IPv6. In single stack + clusters a single IP address is expected. When omitted, + values from the status.ingressIPs will be used. + Once set, the list cannot be completely removed + (but its second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: ingressIPs must contain at most one IPv4 + address and at most one IPv6 address + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() + : true' + machineNetworks: + description: machineNetworks are IP networks used + to connect all the OpenShift cluster nodes. Each + network is provided in the CIDR format and should + be IPv4 or IPv6, for example "10.0.0.0/8" or "fd00::/8". + items: + description: CIDR is an IP address range in CIDR + notation (for example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 32 + type: array + x-kubernetes-list-type: set + type: object + x-kubernetes-validations: + - message: apiServerInternalIPs list is required once + set + rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' + - message: ingressIPs list is required once set + rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' + equinixMetal: + description: EquinixMetal contains settings specific to + the Equinix Metal infrastructure provider. + type: object + external: + description: ExternalPlatformType represents generic infrastructure + provider. Platform-specific components should be supplemented + separately. + properties: + platformName: + default: Unknown + description: PlatformName holds the arbitrary string + representing the infrastructure provider name, expected + to be set at the installation time. This field is + solely for informational and reporting purposes + and is not expected to be used for decision-making. + type: string + x-kubernetes-validations: + - message: platform name cannot be changed once set + rule: oldSelf == 'Unknown' || self == oldSelf + type: object + gcp: + description: GCP contains settings specific to the Google + Cloud Platform infrastructure provider. + type: object + ibmcloud: + description: IBMCloud contains settings specific to the + IBMCloud infrastructure provider. + type: object + kubevirt: + description: Kubevirt contains settings specific to the + kubevirt infrastructure provider. + type: object + nutanix: + description: Nutanix contains settings specific to the + Nutanix infrastructure provider. + properties: + failureDomains: + description: failureDomains configures failure domains + information for the Nutanix platform. When set, + the failure domains defined here may be used to + spread Machines across prism element clusters to + improve fault tolerance of the cluster. + items: + description: NutanixFailureDomain configures failure + domain information for the Nutanix platform. + properties: + cluster: + description: cluster is to identify the cluster + (the Prism Element under management of the + Prism Central), in which the Machine's VM + will be created. The cluster identifier (uuid + or name) can be obtained from the Prism Central + console or using the prism_central API. + properties: + name: + description: name is the resource name in + the PC. It cannot be empty if the type + is Name. + type: string + type: + description: type is the identifier type + to use for this resource. + enum: + - UUID + - Name + type: string + uuid: + description: uuid is the UUID of the resource + in the PC. It cannot be empty if the type + is UUID. + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: uuid configuration is required when + type is UUID, and forbidden otherwise + rule: 'has(self.type) && self.type == ''UUID'' + ? has(self.uuid) : !has(self.uuid)' + - message: name configuration is required when + type is Name, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Name'' + ? has(self.name) : !has(self.name)' + name: + description: name defines the unique name of + a failure domain. Name is required and must + be at most 64 characters in length. It must + consist of only lower case alphanumeric characters + and hyphens (-). It must start and end with + an alphanumeric character. This value is arbitrary + and is used to identify the failure domain + within the platform. + maxLength: 64 + minLength: 1 + pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?' + type: string + subnets: + description: subnets holds a list of identifiers + (one or more) of the cluster's network subnets + for the Machine's VM to connect to. The subnet + identifiers (uuid or name) can be obtained + from the Prism Central console or using the + prism_central API. + items: + description: NutanixResourceIdentifier holds + the identity of a Nutanix PC resource (cluster, + image, subnet, etc.) + properties: + name: + description: name is the resource name + in the PC. It cannot be empty if the + type is Name. + type: string + type: + description: type is the identifier type + to use for this resource. + enum: + - UUID + - Name + type: string + uuid: + description: uuid is the UUID of the resource + in the PC. It cannot be empty if the + type is UUID. + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: uuid configuration is required + when type is UUID, and forbidden otherwise + rule: 'has(self.type) && self.type == ''UUID'' + ? has(self.uuid) : !has(self.uuid)' + - message: name configuration is required + when type is Name, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Name'' + ? has(self.name) : !has(self.name)' + maxItems: 1 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + required: + - cluster + - name + - subnets + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + prismCentral: + description: prismCentral holds the endpoint address + and port to access the Nutanix Prism Central. When + a cluster-wide proxy is installed, by default, this + endpoint will be accessed via the proxy. Should + you wish for communication with this endpoint not + to be proxied, please add the endpoint to the proxy + spec.noProxy list. + properties: + address: + description: address is the endpoint address (DNS + name or IP address) of the Nutanix Prism Central + or Element (cluster) + maxLength: 256 + type: string + port: + description: port is the port number to access + the Nutanix Prism Central or Element (cluster) + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - address + - port + type: object + prismElements: + description: prismElements holds one or more endpoint + address and port data to access the Nutanix Prism + Elements (clusters) of the Nutanix Prism Central. + Currently we only support one Prism Element (cluster) + for an OpenShift cluster, where all the Nutanix + resources (VMs, subnets, volumes, etc.) used in + the OpenShift cluster are located. In the future, + we may support Nutanix resources (VMs, etc.) spread + over multiple Prism Elements (clusters) of the Prism + Central. + items: + description: NutanixPrismElementEndpoint holds the + name and endpoint data for a Prism Element (cluster) + properties: + endpoint: + description: endpoint holds the endpoint address + and port data of the Prism Element (cluster). + When a cluster-wide proxy is installed, by + default, this endpoint will be accessed via + the proxy. Should you wish for communication + with this endpoint not to be proxied, please + add the endpoint to the proxy spec.noProxy + list. + properties: + address: + description: address is the endpoint address + (DNS name or IP address) of the Nutanix + Prism Central or Element (cluster) + maxLength: 256 + type: string + port: + description: port is the port number to + access the Nutanix Prism Central or Element + (cluster) + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - address + - port + type: object + name: + description: name is the name of the Prism Element + (cluster). This value will correspond with + the cluster field configured on other resources + (eg Machines, PVCs, etc). + maxLength: 256 + type: string + required: + - endpoint + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - prismCentral + - prismElements + type: object + openstack: + description: OpenStack contains settings specific to the + OpenStack infrastructure provider. + properties: + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. These are the IPs for a self-hosted + load balancer in front of the API servers. In dual + stack clusters this list contains two IP addresses, + one from IPv4 family and one from IPv6. In single + stack clusters a single IP address is expected. + When omitted, values from the status.apiServerInternalIPs + will be used. Once set, the list cannot be completely + removed (but its second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: apiServerInternalIPs must contain at most + one IPv4 address and at most one IPv6 address + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() + : true' + ingressIPs: + description: ingressIPs are the external IPs which + route to the default ingress controller. The IPs + are suitable targets of a wildcard DNS record used + to resolve default route host names. In dual stack + clusters this list contains two IP addresses, one + from IPv4 family and one from IPv6. In single stack + clusters a single IP address is expected. When omitted, + values from the status.ingressIPs will be used. + Once set, the list cannot be completely removed + (but its second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: ingressIPs must contain at most one IPv4 + address and at most one IPv6 address + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() + : true' + machineNetworks: + description: machineNetworks are IP networks used + to connect all the OpenShift cluster nodes. Each + network is provided in the CIDR format and should + be IPv4 or IPv6, for example "10.0.0.0/8" or "fd00::/8". + items: + description: CIDR is an IP address range in CIDR + notation (for example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 32 + type: array + x-kubernetes-list-type: set + type: object + x-kubernetes-validations: + - message: apiServerInternalIPs list is required once + set + rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' + - message: ingressIPs list is required once set + rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' + ovirt: + description: Ovirt contains settings specific to the oVirt + infrastructure provider. + type: object + powervs: + description: PowerVS contains settings specific to the + IBM Power Systems Virtual Servers infrastructure provider. + properties: + serviceEndpoints: + description: serviceEndpoints is a list of custom + endpoints which will override the default service + endpoints of a Power VS service. + items: + description: PowervsServiceEndpoint stores the configuration + of a custom url to override existing defaults + of PowerVS Services. + properties: + name: + description: name is the name of the Power VS + service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api + ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller + Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with + scheme https, that overrides the default generated + endpoint for a client. This must be provided + and cannot be empty. + format: uri + pattern: ^https:// + type: string + required: + - name + - url + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + type: + description: type is the underlying infrastructure provider + for the cluster. This value controls whether infrastructure + automation such as service load balancers, dynamic volume + provisioning, machine creation and deletion, and other + integrations are enabled. If None, no infrastructure + automation is enabled. Allowed values are "AWS", "Azure", + "BareMetal", "GCP", "Libvirt", "OpenStack", "VSphere", + "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", + "Nutanix" and "None". Individual components may not + support all platforms, and must handle unrecognized + platforms as None if they do not support that platform. + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + vsphere: + description: VSphere contains settings specific to the + VSphere infrastructure provider. + properties: + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. These are the IPs for a self-hosted + load balancer in front of the API servers. In dual + stack clusters this list contains two IP addresses, + one from IPv4 family and one from IPv6. In single + stack clusters a single IP address is expected. + When omitted, values from the status.apiServerInternalIPs + will be used. Once set, the list cannot be completely + removed (but its second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: apiServerInternalIPs must contain at most + one IPv4 address and at most one IPv6 address + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() + : true' + failureDomains: + description: failureDomains contains the definition + of region, zone and the vCenter topology. If this + is omitted failure domains (regions and zones) will + not be used. + items: + description: VSpherePlatformFailureDomainSpec holds + the region and zone failure domain and the vCenter + topology of that failure domain. + properties: + name: + description: name defines the arbitrary but + unique name of a failure domain. + maxLength: 256 + minLength: 1 + type: string + region: + description: region defines the name of a region + tag that will be attached to a vCenter datacenter. + The tag category in vCenter must be named + openshift-region. + maxLength: 80 + minLength: 1 + type: string + server: + description: server is the fully-qualified domain + name or the IP address of the vCenter server. + --- + maxLength: 255 + minLength: 1 + type: string + topology: + description: Topology describes a given failure + domain using vSphere constructs + properties: + computeCluster: + description: computeCluster the absolute + path of the vCenter cluster in which virtual + machine will be located. The absolute + path is of the form //host/. + The maximum length of the path is 2048 + characters. + maxLength: 2048 + pattern: ^/.*?/host/.*? + type: string + datacenter: + description: datacenter is the name of vCenter + datacenter in which virtual machines will + be located. The maximum length of the + datacenter name is 80 characters. + maxLength: 80 + type: string + datastore: + description: datastore is the absolute path + of the datastore in which the virtual + machine is located. The absolute path + is of the form //datastore/ + The maximum length of the path is 2048 + characters. + maxLength: 2048 + pattern: ^/.*?/datastore/.*? + type: string + folder: + description: folder is the absolute path + of the folder where virtual machines are + located. The absolute path is of the form + //vm/. The maximum + length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/vm/.*? + type: string + networks: + description: networks is the list of port + group network names within this failure + domain. Currently, we only support a single + interface per RHCOS virtual machine. The + available networks (port groups) can be + listed using `govc ls 'network/*'` The + single interface should be the absolute + path of the form //network/. + items: + type: string + maxItems: 1 + minItems: 1 + type: array + x-kubernetes-list-type: atomic + resourcePool: + description: resourcePool is the absolute + path of the resource pool where virtual + machines will be created. The absolute + path is of the form //host//Resources/. + The maximum length of the path is 2048 + characters. + maxLength: 2048 + pattern: ^/.*?/host/.*?/Resources.* + type: string + template: + description: "template is the full inventory + path of the virtual machine or template + that will be cloned when creating new + machines in this failure domain. The maximum + length of the path is 2048 characters. + \n When omitted, the template will be + calculated by the control plane machineset + operator based on the region and zone + defined in VSpherePlatformFailureDomainSpec. + For example, for zone=zonea, region=region1, + and infrastructure name=test, the template + path would be calculated as //vm/test-rhcos-region1-zonea." + maxLength: 2048 + minLength: 1 + pattern: ^/.*?/vm/.*? + type: string + required: + - computeCluster + - datacenter + - datastore + - networks + type: object + zone: + description: zone defines the name of a zone + tag that will be attached to a vCenter cluster. + The tag category in vCenter must be named + openshift-zone. + maxLength: 80 + minLength: 1 + type: string + required: + - name + - region + - server + - topology + - zone + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + ingressIPs: + description: ingressIPs are the external IPs which + route to the default ingress controller. The IPs + are suitable targets of a wildcard DNS record used + to resolve default route host names. In dual stack + clusters this list contains two IP addresses, one + from IPv4 family and one from IPv6. In single stack + clusters a single IP address is expected. When omitted, + values from the status.ingressIPs will be used. + Once set, the list cannot be completely removed + (but its second entry can). + items: + description: IP is an IP address (for example, "10.0.0.0" + or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 2 + type: array + x-kubernetes-list-type: set + x-kubernetes-validations: + - message: ingressIPs must contain at most one IPv4 + address and at most one IPv6 address + rule: 'size(self) == 2 && isIP(self[0]) && isIP(self[1]) + ? ip(self[0]).family() != ip(self[1]).family() + : true' + machineNetworks: + description: machineNetworks are IP networks used + to connect all the OpenShift cluster nodes. Each + network is provided in the CIDR format and should + be IPv4 or IPv6, for example "10.0.0.0/8" or "fd00::/8". + items: + description: CIDR is an IP address range in CIDR + notation (for example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 32 + type: array + x-kubernetes-list-type: set + nodeNetworking: + description: nodeNetworking contains the definition + of internal and external network constraints for + assigning the node's networking. If this field is + omitted, networking defaults to the legacy address + selection behavior which is to only support a single + address and return the first one found. + properties: + external: + description: external represents the network configuration + of the node that is externally routable. + properties: + excludeNetworkSubnetCidr: + description: excludeNetworkSubnetCidr IP addresses + in subnet ranges will be excluded when selecting + the IP address from the VirtualMachine's + VM for use in the status.addresses fields. + --- + items: + type: string + type: array + x-kubernetes-list-type: atomic + network: + description: network VirtualMachine's VM Network + names that will be used to when searching + for status.addresses fields. Note that if + internal.networkSubnetCIDR and external.networkSubnetCIDR + are not set, then the vNIC associated to + this network must only have a single IP + address assigned to it. The available networks + (port groups) can be listed using `govc + ls 'network/*'` + type: string + networkSubnetCidr: + description: networkSubnetCidr IP address + on VirtualMachine's network interfaces included + in the fields' CIDRs that will be used in + respective status.addresses fields. --- + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + internal: + description: internal represents the network configuration + of the node that is routable only within the + cluster. + properties: + excludeNetworkSubnetCidr: + description: excludeNetworkSubnetCidr IP addresses + in subnet ranges will be excluded when selecting + the IP address from the VirtualMachine's + VM for use in the status.addresses fields. + --- + items: + type: string + type: array + x-kubernetes-list-type: atomic + network: + description: network VirtualMachine's VM Network + names that will be used to when searching + for status.addresses fields. Note that if + internal.networkSubnetCIDR and external.networkSubnetCIDR + are not set, then the vNIC associated to + this network must only have a single IP + address assigned to it. The available networks + (port groups) can be listed using `govc + ls 'network/*'` + type: string + networkSubnetCidr: + description: networkSubnetCidr IP address + on VirtualMachine's network interfaces included + in the fields' CIDRs that will be used in + respective status.addresses fields. --- + items: + type: string + type: array + x-kubernetes-list-type: set + type: object + type: object + vcenters: + description: vcenters holds the connection details + for services to communicate with vCenter. Currently, + only a single vCenter is supported. --- + items: + description: VSpherePlatformVCenterSpec stores the + vCenter connection fields. This is used by the + vSphere CCM. + properties: + datacenters: + description: The vCenter Datacenters in which + the RHCOS vm guests are located. This field + will be used by the Cloud Controller Manager. + Each datacenter listed here should be used + within a topology. + items: + type: string + minItems: 1 + type: array + x-kubernetes-list-type: set + port: + description: port is the TCP port that will + be used to communicate to the vCenter endpoint. + When omitted, this means the user has no opinion + and it is up to the platform to choose a sensible + default, which is subject to change over time. + format: int32 + maximum: 32767 + minimum: 1 + type: integer + server: + description: server is the fully-qualified domain + name or the IP address of the vCenter server. + --- + maxLength: 255 + type: string + required: + - datacenters + - server + type: object + maxItems: 1 + minItems: 0 + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-validations: + - message: apiServerInternalIPs list is required once + set + rule: '!has(oldSelf.apiServerInternalIPs) || has(self.apiServerInternalIPs)' + - message: ingressIPs list is required once set + rule: '!has(oldSelf.ingressIPs) || has(self.ingressIPs)' + type: object + type: object + status: + description: status holds observed values from the cluster. They + may not be overridden. + properties: + apiServerInternalURI: + description: apiServerInternalURL is a valid URI with scheme + 'https', address and optionally a port (defaulting to 443). apiServerInternalURL + can be used by components like kubelets, to contact the + Kubernetes API server using the infrastructure provider + rather than Kubernetes networking. + type: string + apiServerURL: + description: apiServerURL is a valid URI with scheme 'https', + address and optionally a port (defaulting to 443). apiServerURL + can be used by components like the web console to tell users + where to find the Kubernetes API. + type: string + controlPlaneTopology: + default: HighlyAvailable + description: controlPlaneTopology expresses the expectations + for operands that normally run on control nodes. The default + is 'HighlyAvailable', which represents the behavior operators + have in a "normal" cluster. The 'SingleReplica' mode will + be used in single-node deployments and the operators should + not configure the operand for highly-available operation + The 'External' mode indicates that the control plane is + hosted externally to the cluster and that its components + are not visible within the cluster. + enum: + - HighlyAvailable + - SingleReplica + - External + type: string + cpuPartitioning: + default: None + description: cpuPartitioning expresses if CPU partitioning + is a currently enabled feature in the cluster. CPU Partitioning + means that this cluster can support partitioning workloads + to specific CPU Sets. Valid values are "None" and "AllNodes". + When omitted, the default value is "None". The default value + of "None" indicates that no nodes will be setup with CPU + partitioning. The "AllNodes" value indicates that all nodes + have been setup with CPU partitioning, and can then be further + configured via the PerformanceProfile API. + enum: + - None + - AllNodes + type: string + etcdDiscoveryDomain: + description: 'etcdDiscoveryDomain is the domain used to fetch + the SRV records for discovering etcd servers and clients. + For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery + deprecated: as of 4.7, this field is no longer set or honored. It + will be removed in a future release.' + type: string + infrastructureName: + description: infrastructureName uniquely identifies a cluster + with a human friendly name. Once set it should not be changed. + Must be of max length 27 and must have only alphanumeric + or hyphen characters. + type: string + infrastructureTopology: + default: HighlyAvailable + description: 'infrastructureTopology expresses the expectations + for infrastructure services that do not run on control plane + nodes, usually indicated by a node selector for a `role` + value other than `master`. The default is ''HighlyAvailable'', + which represents the behavior operators have in a "normal" + cluster. The ''SingleReplica'' mode will be used in single-node + deployments and the operators should not configure the operand + for highly-available operation NOTE: External topology mode + is not applicable for this field.' + enum: + - HighlyAvailable + - SingleReplica + type: string + platform: + description: "platform is the underlying infrastructure provider + for the cluster. \n Deprecated: Use platformStatus.type + instead." + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + platformStatus: + description: platformStatus holds status information specific + to the underlying infrastructure provider. + properties: + alibabaCloud: + description: AlibabaCloud contains settings specific to + the Alibaba Cloud infrastructure provider. + properties: + region: + description: region specifies the region for Alibaba + Cloud resources created for the cluster. + pattern: ^[0-9A-Za-z-]+$ + type: string + resourceGroupID: + description: resourceGroupID is the ID of the resource + group for the cluster. + pattern: ^(rg-[0-9A-Za-z]+)?$ + type: string + resourceTags: + description: resourceTags is a list of additional + tags to apply to Alibaba Cloud resources created + for the cluster. + items: + description: AlibabaCloudResourceTag is the set + of tags to add to apply to resources. + properties: + key: + description: key is the key of the tag. + maxLength: 128 + minLength: 1 + type: string + value: + description: value is the value of the tag. + maxLength: 128 + minLength: 1 + type: string + required: + - key + - value + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + required: + - region + type: object + aws: + description: AWS contains settings specific to the Amazon + Web Services infrastructure provider. + properties: + region: + description: region holds the default AWS region for + new AWS resources created by the cluster. + type: string + resourceTags: + description: resourceTags is a list of additional + tags to apply to AWS resources created for the cluster. + See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html + for information on tagging AWS resources. AWS supports + a maximum of 50 tags per resource. OpenShift reserves + 25 tags for its use, leaving 25 tags available for + the user. + items: + description: AWSResourceTag is a tag to apply to + AWS resources created for the cluster. + properties: + key: + description: key is the key of the tag + maxLength: 128 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + type: string + value: + description: value is the value of the tag. + Some AWS service do not support empty values. + Since tags are added to resources in many + services, the length of the tag value must + meet the requirements of all services. + maxLength: 256 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + type: string + required: + - key + - value + type: object + maxItems: 25 + type: array + x-kubernetes-list-type: atomic + serviceEndpoints: + description: ServiceEndpoints list contains custom + endpoints which will override default service endpoint + of AWS Services. There must be only one ServiceEndpoint + for a service. + items: + description: AWSServiceEndpoint store the configuration + of a custom url to override existing defaults + of AWS Services. + properties: + name: + description: name is the name of the AWS service. + The list of all the service names can be found + at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html + This must be provided and cannot be empty. + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with + scheme https, that overrides the default generated + endpoint for a client. This must be provided + and cannot be empty. + pattern: ^https:// + type: string + type: object + type: array + x-kubernetes-list-type: atomic + type: object + azure: + description: Azure contains settings specific to the Azure + infrastructure provider. + properties: + armEndpoint: + description: armEndpoint specifies a URL to use for + resource management in non-soverign clouds such + as Azure Stack. + type: string + cloudName: + description: cloudName is the name of the Azure cloud + environment which can be used to configure the Azure + SDK with the appropriate Azure API endpoints. If + empty, the value is equal to `AzurePublicCloud`. + enum: + - "" + - AzurePublicCloud + - AzureUSGovernmentCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureStackCloud + type: string + networkResourceGroupName: + description: networkResourceGroupName is the Resource + Group for network resources like the Virtual Network + and Subnets used by the cluster. If empty, the value + is same as ResourceGroupName. + type: string + resourceGroupName: + description: resourceGroupName is the Resource Group + for new Azure resources created for the cluster. + type: string + resourceTags: + description: resourceTags is a list of additional + tags to apply to Azure resources created for the + cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags + for information on tagging Azure resources. Due + to limitations on Automation, Content Delivery Network, + DNS Azure resources, a maximum of 15 tags may be + applied. OpenShift reserves 5 tags for internal + use, allowing 10 tags for user configuration. + items: + description: AzureResourceTag is a tag to apply + to Azure resources created for the cluster. + properties: + key: + description: key is the key part of the tag. + A tag key can have a maximum of 128 characters + and cannot be empty. Key must begin with a + letter, end with a letter, number or underscore, + and must contain only alphanumeric characters + and the following special characters `_ . + -`. + maxLength: 128 + minLength: 1 + pattern: ^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$ + type: string + value: + description: 'value is the value part of the + tag. A tag value can have a maximum of 256 + characters and cannot be empty. Value must + contain only alphanumeric characters and the + following special characters `_ + , - . / + : ; < = > ? @`.' + maxLength: 256 + minLength: 1 + pattern: ^[0-9A-Za-z_.=+-@]+$ + type: string + required: + - key + - value + type: object + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: resourceTags are immutable and may only + be configured during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, + x in self) + type: object + x-kubernetes-validations: + - message: resourceTags may only be configured during + installation + rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) + || has(oldSelf.resourceTags) && has(self.resourceTags)' + baremetal: + description: BareMetal contains settings specific to the + BareMetal platform. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. It is the IP that the Infrastructure.status.apiServerInternalURI + points to. It is the IP for a self-hosted load balancer + in front of the API servers. \n Deprecated: Use + APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. These are the IPs for a self-hosted + load balancer in front of the API servers. In dual + stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + ingressIP: + description: "ingressIP is an external IP which routes + to the default ingress controller. The IP is a suitable + target of a wildcard DNS record used to resolve + default route host names. \n Deprecated: Use IngressIPs + instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which + route to the default ingress controller. The IPs + are suitable targets of a wildcard DNS record used + to resolve default route host names. In dual stack + clusters this list contains two IPs otherwise only + one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer + used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer + used by the cluster on BareMetal platform which + can be a user-managed or openshift-managed load + balancer that is to be used for the OpenShift + API and Ingress endpoints. When set to OpenShiftManagedDefault + the static pods in charge of API and Ingress + traffic load-balancing defined in the machine + config operator will be deployed. When set to + UserManaged these static pods will not be deployed + and it is expected that the load balancer is + configured out of band by the deployer. When + omitted, this means no opinion and the platform + is left to choose a reasonable default. The + default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + machineNetworks: + description: machineNetworks are IP networks used + to connect all the OpenShift cluster nodes. + items: + description: CIDR is an IP address range in CIDR + notation (for example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 32 + type: array + x-kubernetes-list-type: set + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal + DNS used by the nodes. Unlike the one managed by + the DNS operator, `NodeDNSIP` provides name resolution + for the nodes themselves. There is no DNS-as-a-service + for BareMetal deployments. In order to minimize + necessary changes to the datacenter DNS, a DNS service + is hosted as a static pod to serve those hostnames + to the nodes in the cluster. + type: string + type: object + equinixMetal: + description: EquinixMetal contains settings specific to + the Equinix Metal infrastructure provider. + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. It is the IP that the Infrastructure.status.apiServerInternalURI + points to. It is the IP for a self-hosted load balancer + in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes + to the default ingress controller. The IP is a suitable + target of a wildcard DNS record used to resolve + default route host names. + type: string + type: object + external: + description: External contains settings specific to the + generic External infrastructure provider. + properties: + cloudControllerManager: + description: cloudControllerManager contains settings + specific to the external Cloud Controller Manager + (a.k.a. CCM or CPI). When omitted, new nodes will + be not tainted and no extra initialization from + the cloud controller manager is expected. + properties: + state: + description: "state determines whether or not + an external Cloud Controller Manager is expected + to be installed within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager + \n Valid values are \"External\", \"None\" and + omitted. When set to \"External\", new nodes + will be tainted as uninitialized when created, + preventing them from running workloads until + they are initialized by the cloud controller + manager. When omitted or set to \"None\", new + nodes will be not tainted and no extra initialization + from the cloud controller manager is expected." + enum: + - "" + - External + - None + type: string + x-kubernetes-validations: + - message: state is immutable once set + rule: self == oldSelf + type: object + x-kubernetes-validations: + - message: state may not be added or removed once + set + rule: (has(self.state) == has(oldSelf.state)) || + (!has(oldSelf.state) && self.state != "External") + type: object + x-kubernetes-validations: + - message: cloudControllerManager may not be added or + removed once set + rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) + gcp: + description: GCP contains settings specific to the Google + Cloud Platform infrastructure provider. + properties: + cloudLoadBalancerConfig: + default: + dnsType: PlatformDefault + description: cloudLoadBalancerConfig is a union that + contains the IP addresses of API, API-Int and Ingress + Load Balancers created on the cloud platform. These + values would not be populated on on-prem platforms. + These Load Balancer IPs are used to configure the + in-cluster DNS instances for API, API-Int and Ingress + services. `dnsType` is expected to be set to `ClusterHosted` + when these Load Balancer IP addresses are populated + and used. + nullable: true + properties: + clusterHosted: + description: clusterHosted holds the IP addresses + of API, API-Int and Ingress Load Balancers on + Cloud Platforms. The DNS solution hosted within + the cluster use these IP addresses to provide + resolution for API, API-Int and Ingress services. + properties: + apiIntLoadBalancerIPs: + description: apiIntLoadBalancerIPs holds Load + Balancer IPs for the internal API service. + These Load Balancer IP addresses can be + IPv4 and/or IPv6 addresses. Entries in the + apiIntLoadBalancerIPs must be unique. A + maximum of 16 IP addresses are permitted. + format: ip + items: + description: IP is an IP address (for example, + "10.0.0.0" or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + apiLoadBalancerIPs: + description: apiLoadBalancerIPs holds Load + Balancer IPs for the API service. These + Load Balancer IP addresses can be IPv4 and/or + IPv6 addresses. Could be empty for private + clusters. Entries in the apiLoadBalancerIPs + must be unique. A maximum of 16 IP addresses + are permitted. + format: ip + items: + description: IP is an IP address (for example, + "10.0.0.0" or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + ingressLoadBalancerIPs: + description: ingressLoadBalancerIPs holds + IPs for Ingress Load Balancers. These Load + Balancer IP addresses can be IPv4 and/or + IPv6 addresses. Entries in the ingressLoadBalancerIPs + must be unique. A maximum of 16 IP addresses + are permitted. + format: ip + items: + description: IP is an IP address (for example, + "10.0.0.0" or "fd00::"). + maxLength: 39 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid IP address + rule: isIP(self) + maxItems: 16 + type: array + x-kubernetes-list-type: set + type: object + dnsType: + default: PlatformDefault + description: dnsType indicates the type of DNS + solution in use within the cluster. Its default + value of `PlatformDefault` indicates that the + cluster's DNS is the default provided by the + cloud platform. It can be set to `ClusterHosted` + to bypass the configuration of the cloud default + DNS. In this mode, the cluster needs to provide + a self-hosted DNS solution for the cluster's + installation to succeed. The cluster's use of + the cloud's Load Balancers is unaffected by + this setting. The value is immutable after it + has been set at install time. Currently, there + is no way for the customer to add additional + DNS entries into the cluster hosted DNS. Enabling + this functionality allows the user to start + their own DNS solution outside the cluster after + installation is complete. The customer would + be responsible for configuring this custom DNS + solution, and it can be run in addition to the + in-cluster DNS solution. + enum: + - ClusterHosted + - PlatformDefault + type: string + x-kubernetes-validations: + - message: dnsType is immutable + rule: oldSelf == '' || self == oldSelf + type: object + x-kubernetes-validations: + - message: clusterHosted is permitted only when dnsType + is ClusterHosted + rule: 'has(self.dnsType) && self.dnsType != ''ClusterHosted'' + ? !has(self.clusterHosted) : true' + projectID: + description: resourceGroupName is the Project ID for + new GCP resources created for the cluster. + type: string + region: + description: region holds the region for new GCP resources + created for the cluster. + type: string + resourceLabels: + description: resourceLabels is a list of additional + labels to apply to GCP resources created for the + cluster. See https://cloud.google.com/compute/docs/labeling-resources + for information on labeling GCP resources. GCP supports + a maximum of 64 labels per resource. OpenShift reserves + 32 labels for internal use, allowing 32 labels for + user configuration. + items: + description: GCPResourceLabel is a label to apply + to GCP resources created for the cluster. + properties: + key: + description: key is the key part of the label. + A label key can have a maximum of 63 characters + and cannot be empty. Label key must begin + with a lowercase letter, and must contain + only lowercase letters, numeric characters, + and the following special characters `_-`. + Label key must not have the reserved prefixes + `kubernetes-io` and `openshift-io`. + maxLength: 63 + minLength: 1 + pattern: ^[a-z][0-9a-z_-]{0,62}$ + type: string + x-kubernetes-validations: + - message: label keys must not start with either + `openshift-io` or `kubernetes-io` + rule: '!self.startsWith(''openshift-io'') + && !self.startsWith(''kubernetes-io'')' + value: + description: value is the value part of the + label. A label value can have a maximum of + 63 characters and cannot be empty. Value must + contain only lowercase letters, numeric characters, + and the following special characters `_-`. + maxLength: 63 + minLength: 1 + pattern: ^[0-9a-z_-]{1,63}$ + type: string + required: + - key + - value + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: resourceLabels are immutable and may only + be configured during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, + x in self) + resourceTags: + description: resourceTags is a list of additional + tags to apply to GCP resources created for the cluster. + See https://cloud.google.com/resource-manager/docs/tags/tags-overview + for information on tagging GCP resources. GCP supports + a maximum of 50 tags per resource. + items: + description: GCPResourceTag is a tag to apply to + GCP resources created for the cluster. + properties: + key: + description: key is the key part of the tag. + A tag key can have a maximum of 63 characters + and cannot be empty. Tag key must begin and + end with an alphanumeric character, and must + contain only uppercase, lowercase alphanumeric + characters, and the following special characters + `._-`. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$ + type: string + parentID: + description: 'parentID is the ID of the hierarchical + resource where the tags are defined, e.g. + at the Organization or the Project level. + To find the Organization or Project ID refer + to the following pages: https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, + https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. + An OrganizationID must consist of decimal + numbers, and cannot have leading zeroes. A + ProjectID must be 6 to 30 characters in length, + can only contain lowercase letters, numbers, + and hyphens, and must start with a letter, + and cannot end with a hyphen.' + maxLength: 32 + minLength: 1 + pattern: (^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$) + type: string + value: + description: value is the value part of the + tag. A tag value can have a maximum of 63 + characters and cannot be empty. Tag value + must begin and end with an alphanumeric character, + and must contain only uppercase, lowercase + alphanumeric characters, and the following + special characters `_-.@%=+:,*#&(){}[]` and + spaces. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$ + type: string + required: + - key + - parentID + - value + type: object + maxItems: 50 + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: resourceTags are immutable and may only + be configured during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, + x in self) + type: object + x-kubernetes-validations: + - message: resourceLabels may only be configured during + installation + rule: '!has(oldSelf.resourceLabels) && !has(self.resourceLabels) + || has(oldSelf.resourceLabels) && has(self.resourceLabels)' + - message: resourceTags may only be configured during + installation + rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) + || has(oldSelf.resourceTags) && has(self.resourceTags)' + ibmcloud: + description: IBMCloud contains settings specific to the + IBMCloud infrastructure provider. + properties: + cisInstanceCRN: + description: CISInstanceCRN is the CRN of the Cloud + Internet Services instance managing the DNS zone + for the cluster's base domain + type: string + dnsInstanceCRN: + description: DNSInstanceCRN is the CRN of the DNS + Services instance managing the DNS zone for the + cluster's base domain + type: string + location: + description: Location is where the cluster has been + deployed + type: string + providerType: + description: ProviderType indicates the type of cluster + that was created + type: string + resourceGroupName: + description: ResourceGroupName is the Resource Group + for new IBMCloud resources created for the cluster. + type: string + serviceEndpoints: + description: serviceEndpoints is a list of custom + endpoints which will override the default service + endpoints of an IBM Cloud service. These endpoints + are consumed by components within the cluster to + reach the respective IBM Cloud Services. + items: + description: IBMCloudServiceEndpoint stores the + configuration of a custom url to override existing + defaults of IBM Cloud Services. + properties: + name: + description: 'name is the name of the IBM Cloud + service. Possible values are: CIS, COS, DNSServices, + GlobalSearch, GlobalTagging, HyperProtect, + IAM, KeyProtect, ResourceController, ResourceManager, + or VPC. For example, the IBM Cloud Private + IAM service could be configured with the service + `name` of `IAM` and `url` of `https://private.iam.cloud.ibm.com` + Whereas the IBM Cloud Private VPC service + for US South (Dallas) could be configured + with the service `name` of `VPC` and `url` + of `https://us.south.private.iaas.cloud.ibm.com`' + enum: + - CIS + - COS + - DNSServices + - GlobalSearch + - GlobalTagging + - HyperProtect + - IAM + - KeyProtect + - ResourceController + - ResourceManager + - VPC + type: string + url: + description: url is fully qualified URI with + scheme https, that overrides the default generated + endpoint for a client. This must be provided + and cannot be empty. + type: string + x-kubernetes-validations: + - message: url must be a valid absolute URL + rule: isURL(self) + required: + - name + - url + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + kubevirt: + description: Kubevirt contains settings specific to the + kubevirt infrastructure provider. + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. It is the IP that the Infrastructure.status.apiServerInternalURI + points to. It is the IP for a self-hosted load balancer + in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes + to the default ingress controller. The IP is a suitable + target of a wildcard DNS record used to resolve + default route host names. + type: string + type: object + nutanix: + description: Nutanix contains settings specific to the + Nutanix infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. It is the IP that the Infrastructure.status.apiServerInternalURI + points to. It is the IP for a self-hosted load balancer + in front of the API servers. \n Deprecated: Use + APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. These are the IPs for a self-hosted + load balancer in front of the API servers. In dual + stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + ingressIP: + description: "ingressIP is an external IP which routes + to the default ingress controller. The IP is a suitable + target of a wildcard DNS record used to resolve + default route host names. \n Deprecated: Use IngressIPs + instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which + route to the default ingress controller. The IPs + are suitable targets of a wildcard DNS record used + to resolve default route host names. In dual stack + clusters this list contains two IPs otherwise only + one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer + used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer + used by the cluster on Nutanix platform which + can be a user-managed or openshift-managed load + balancer that is to be used for the OpenShift + API and Ingress endpoints. When set to OpenShiftManagedDefault + the static pods in charge of API and Ingress + traffic load-balancing defined in the machine + config operator will be deployed. When set to + UserManaged these static pods will not be deployed + and it is expected that the load balancer is + configured out of band by the deployer. When + omitted, this means no opinion and the platform + is left to choose a reasonable default. The + default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + type: object + openstack: + description: OpenStack contains settings specific to the + OpenStack infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. It is the IP that the Infrastructure.status.apiServerInternalURI + points to. It is the IP for a self-hosted load balancer + in front of the API servers. \n Deprecated: Use + APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. These are the IPs for a self-hosted + load balancer in front of the API servers. In dual + stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + cloudName: + description: cloudName is the name of the desired + OpenStack cloud in the client configuration file + (`clouds.yaml`). + type: string + ingressIP: + description: "ingressIP is an external IP which routes + to the default ingress controller. The IP is a suitable + target of a wildcard DNS record used to resolve + default route host names. \n Deprecated: Use IngressIPs + instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which + route to the default ingress controller. The IPs + are suitable targets of a wildcard DNS record used + to resolve default route host names. In dual stack + clusters this list contains two IPs otherwise only + one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer + used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer + used by the cluster on OpenStack platform which + can be a user-managed or openshift-managed load + balancer that is to be used for the OpenShift + API and Ingress endpoints. When set to OpenShiftManagedDefault + the static pods in charge of API and Ingress + traffic load-balancing defined in the machine + config operator will be deployed. When set to + UserManaged these static pods will not be deployed + and it is expected that the load balancer is + configured out of band by the deployer. When + omitted, this means no opinion and the platform + is left to choose a reasonable default. The + default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + machineNetworks: + description: machineNetworks are IP networks used + to connect all the OpenShift cluster nodes. + items: + description: CIDR is an IP address range in CIDR + notation (for example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 32 + type: array + x-kubernetes-list-type: set + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal + DNS used by the nodes. Unlike the one managed by + the DNS operator, `NodeDNSIP` provides name resolution + for the nodes themselves. There is no DNS-as-a-service + for OpenStack deployments. In order to minimize + necessary changes to the datacenter DNS, a DNS service + is hosted as a static pod to serve those hostnames + to the nodes in the cluster. + type: string + type: object + ovirt: + description: Ovirt contains settings specific to the oVirt + infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. It is the IP that the Infrastructure.status.apiServerInternalURI + points to. It is the IP for a self-hosted load balancer + in front of the API servers. \n Deprecated: Use + APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. These are the IPs for a self-hosted + load balancer in front of the API servers. In dual + stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + ingressIP: + description: "ingressIP is an external IP which routes + to the default ingress controller. The IP is a suitable + target of a wildcard DNS record used to resolve + default route host names. \n Deprecated: Use IngressIPs + instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which + route to the default ingress controller. The IPs + are suitable targets of a wildcard DNS record used + to resolve default route host names. In dual stack + clusters this list contains two IPs otherwise only + one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer + used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer + used by the cluster on Ovirt platform which + can be a user-managed or openshift-managed load + balancer that is to be used for the OpenShift + API and Ingress endpoints. When set to OpenShiftManagedDefault + the static pods in charge of API and Ingress + traffic load-balancing defined in the machine + config operator will be deployed. When set to + UserManaged these static pods will not be deployed + and it is expected that the load balancer is + configured out of band by the deployer. When + omitted, this means no opinion and the platform + is left to choose a reasonable default. The + default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + nodeDNSIP: + description: 'deprecated: as of 4.6, this field is + no longer set or honored. It will be removed in + a future release.' + type: string + type: object + powervs: + description: PowerVS contains settings specific to the + Power Systems Virtual Servers infrastructure provider. + properties: + cisInstanceCRN: + description: CISInstanceCRN is the CRN of the Cloud + Internet Services instance managing the DNS zone + for the cluster's base domain + type: string + dnsInstanceCRN: + description: DNSInstanceCRN is the CRN of the DNS + Services instance managing the DNS zone for the + cluster's base domain + type: string + region: + description: region holds the default Power VS region + for new Power VS resources created by the cluster. + type: string + resourceGroup: + description: 'resourceGroup is the resource group + name for new IBMCloud resources created for a cluster. + The resource group specified here will be used by + cluster-image-registry-operator to set up a COS + Instance in IBMCloud for the cluster registry. More + about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. + When omitted, the image registry operator won''t + be able to configure storage, which results in the + image registry cluster operator not being in an + available state.' + maxLength: 40 + pattern: ^[a-zA-Z0-9-_ ]+$ + type: string + x-kubernetes-validations: + - message: resourceGroup is immutable once set + rule: oldSelf == '' || self == oldSelf + serviceEndpoints: + description: serviceEndpoints is a list of custom + endpoints which will override the default service + endpoints of a Power VS service. + items: + description: PowervsServiceEndpoint stores the configuration + of a custom url to override existing defaults + of PowerVS Services. + properties: + name: + description: name is the name of the Power VS + service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api + ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller + Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with + scheme https, that overrides the default generated + endpoint for a client. This must be provided + and cannot be empty. + format: uri + pattern: ^https:// + type: string + required: + - name + - url + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + zone: + description: 'zone holds the default zone for the + new Power VS resources created by the cluster. Note: + Currently only single-zone OCP clusters are supported' + type: string + type: object + x-kubernetes-validations: + - message: cannot unset resourceGroup once set + rule: '!has(oldSelf.resourceGroup) || has(self.resourceGroup)' + type: + description: "type is the underlying infrastructure provider + for the cluster. This value controls whether infrastructure + automation such as service load balancers, dynamic volume + provisioning, machine creation and deletion, and other + integrations are enabled. If None, no infrastructure + automation is enabled. Allowed values are \"AWS\", \"Azure\", + \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", + \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", + \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual + components may not support all platforms, and must handle + unrecognized platforms as None if they do not support + that platform. \n This value will be synced with to + the `status.platform` and `status.platformStatus.type`. + Currently this value cannot be changed once set." + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + vsphere: + description: VSphere contains settings specific to the + VSphere infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. It is the IP that the Infrastructure.status.apiServerInternalURI + points to. It is the IP for a self-hosted load balancer + in front of the API servers. \n Deprecated: Use + APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses + to contact the Kubernetes API server that can be + used by components inside the cluster, like kubelets + using the infrastructure rather than Kubernetes + networking. These are the IPs for a self-hosted + load balancer in front of the API servers. In dual + stack clusters this list contains two IPs otherwise + only one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + ingressIP: + description: "ingressIP is an external IP which routes + to the default ingress controller. The IP is a suitable + target of a wildcard DNS record used to resolve + default route host names. \n Deprecated: Use IngressIPs + instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which + route to the default ingress controller. The IPs + are suitable targets of a wildcard DNS record used + to resolve default route host names. In dual stack + clusters this list contains two IPs otherwise only + one. + format: ip + items: + type: string + maxItems: 2 + type: array + x-kubernetes-list-type: set + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer + used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer + used by the cluster on VSphere platform which + can be a user-managed or openshift-managed load + balancer that is to be used for the OpenShift + API and Ingress endpoints. When set to OpenShiftManagedDefault + the static pods in charge of API and Ingress + traffic load-balancing defined in the machine + config operator will be deployed. When set to + UserManaged these static pods will not be deployed + and it is expected that the load balancer is + configured out of band by the deployer. When + omitted, this means no opinion and the platform + is left to choose a reasonable default. The + default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + machineNetworks: + description: machineNetworks are IP networks used + to connect all the OpenShift cluster nodes. + items: + description: CIDR is an IP address range in CIDR + notation (for example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 32 + type: array + x-kubernetes-list-type: set + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal + DNS used by the nodes. Unlike the one managed by + the DNS operator, `NodeDNSIP` provides name resolution + for the nodes themselves. There is no DNS-as-a-service + for vSphere deployments. In order to minimize necessary + changes to the datacenter DNS, a DNS service is + hosted as a static pod to serve those hostnames + to the nodes in the cluster. + type: string + type: object + type: object + type: object + required: + - spec + type: object + x-kubernetes-embedded-resource: true + internalRegistryPullSecret: + description: internalRegistryPullSecret is the pull secret for the + internal registry, used by rpm-ostree to pull images from the internal + registry if present + format: byte + nullable: true + type: string + ipFamilies: + description: ipFamilies indicates the IP families in use by the cluster + network + type: string + kubeAPIServerServingCAData: + description: kubeAPIServerServingCAData managed Kubelet to API Server + Cert... Rotated automatically + format: byte + type: string + network: + description: Network contains additional network related information + nullable: true + properties: + mtuMigration: + description: MTUMigration contains the MTU migration configuration. + nullable: true + properties: + machine: + description: Machine contains MTU migration configuration + for the machine's uplink. + properties: + from: + description: From is the MTU to migrate from. + format: int32 + minimum: 0 + type: integer + to: + description: To is the MTU to migrate to. + format: int32 + minimum: 0 + type: integer + type: object + network: + description: Network contains MTU migration configuration + for the default network. + properties: + from: + description: From is the MTU to migrate from. + format: int32 + minimum: 0 + type: integer + to: + description: To is the MTU to migrate to. + format: int32 + minimum: 0 + type: integer + type: object + type: object + required: + - mtuMigration + type: object + networkType: + description: 'networkType holds the type of network the cluster is + using XXX: this is temporary and will be dropped as soon as possible + in favor of a better support to start network related services the + proper way. Nobody is also changing this once the cluster is up + and running the first time, so, disallow regeneration if this changes.' + type: string + osImageURL: + description: OSImageURL is the old-format container image that contains + the OS update payload. + type: string + platform: + description: platform is deprecated, use Infra.Status.PlatformStatus.Type + instead + type: string + proxy: + description: proxy holds the current proxy configuration for the nodes + nullable: true + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or + CIDRs for which the proxy should not be used. + type: string + type: object + pullSecret: + description: pullSecret is the default pull secret that needs to be + installed on all machines. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + releaseImage: + description: releaseImage is the image used when installing the cluster + type: string + rootCAData: + description: rootCAData specifies the root CA data + format: byte + type: string + required: + - additionalTrustBundle + - baseOSContainerImage + - cloudProviderCAData + - cloudProviderConfig + - clusterDNSIP + - dns + - images + - infra + - ipFamilies + - kubeAPIServerServingCAData + - network + - proxy + - releaseImage + - rootCAData + type: object + status: + description: ControllerConfigStatus is the status for ControllerConfig + properties: + conditions: + description: conditions represents the latest available observations + of current state. + items: + description: ControllerConfigStatusCondition contains condition + information for ControllerConfigStatus + properties: + lastTransitionTime: + description: lastTransitionTime is the time of the last update + to the current status object. + format: date-time + nullable: true + type: string + message: + description: message provides additional information about the + current condition. This is only to be consumed by humans. + type: string + reason: + description: reason is the reason for the condition's last transition. Reasons + are PascalCase + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type specifies the state of the operator's reconciliation + functionality. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + x-kubernetes-list-type: atomic + controllerCertificates: + description: controllerCertificates represents the latest available + observations of the automatically rotating certificates in the MCO. + items: + description: ControllerCertificate contains info about a specific + cert. + properties: + bundleFile: + description: bundleFile is the larger bundle a cert comes from + type: string + notAfter: + description: notAfter is the upper boundary for validity + format: date-time + type: string + notBefore: + description: notBefore is the lower boundary for validity + format: date-time + type: string + signer: + description: signer is the cert Issuer + type: string + subject: + description: subject is the cert subject + type: string + required: + - bundleFile + - signer + - subject + type: object + type: array + x-kubernetes-list-type: atomic + observedGeneration: + description: observedGeneration represents the generation observed + by the controller. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..c780364573 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigpools-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,629 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1453 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: machineconfigpools.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: MachineConfigPool + listKind: MachineConfigPoolList + plural: machineconfigpools + shortNames: + - mcp + singular: machineconfigpool + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.configuration.name + name: Config + type: string + - description: When all the machines in the pool are updated to the correct machine + config. + jsonPath: .status.conditions[?(@.type=="Updated")].status + name: Updated + type: string + - description: When at least one of machine is not either not updated or is in + the process of updating to the desired machine config. + jsonPath: .status.conditions[?(@.type=="Updating")].status + name: Updating + type: string + - description: When progress is blocked on updating one or more nodes or the pool + configuration is failing. + jsonPath: .status.conditions[?(@.type=="Degraded")].status + name: Degraded + type: string + - description: Total number of machines in the machine config pool + jsonPath: .status.machineCount + name: MachineCount + type: number + - description: Total number of ready machines targeted by the pool + jsonPath: .status.readyMachineCount + name: ReadyMachineCount + type: number + - description: Total number of machines targeted by the pool that have the CurrentMachineConfig + as their config + jsonPath: .status.updatedMachineCount + name: UpdatedMachineCount + type: number + - description: Total number of machines marked degraded (or unreconcilable) + jsonPath: .status.degradedMachineCount + name: DegradedMachineCount + type: number + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: "MachineConfigPool describes a pool of MachineConfigs. \n Compatibility + level 1: Stable within a major release for a minimum of 12 months or 3 minor + releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MachineConfigPoolSpec is the spec for MachineConfigPool resource. + properties: + configuration: + description: The targeted MachineConfig object for the machine config + pool. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + source: + description: source is the list of MachineConfig objects that + were used to generate the single MachineConfig object specified + in `content`. + items: + description: "ObjectReference contains enough information to + let you inspect or modify the referred object. --- New uses + of this type are discouraged because of difficulty describing + its usage when embedded in APIs. 1. Ignored fields. It includes + many fields which are not generally honored. For instance, + ResourceVersion and FieldPath are both very rarely valid in + actual usage. 2. Invalid usage help. It is impossible to + add specific help for individual usage. In most embedded + usages, there are particular restrictions like, \"must refer + only to types A and B\" or \"UID not honored\" or \"name must + be restricted\". Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, + the validation rules are different by usage, which makes it + hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency + is on the group,resource tuple and the version of the actual + struct is irrelevant. 5. We cannot easily change it. Because + this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an + underspecified API type they do not control. \n Instead of + using this type, create a locally provided and used type that + is well-focused on your reference. For example, ServiceReferences + for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that + triggered the event) or if no container name is specified + "spec.containers[2]" (container with index 2 in this pod). + This syntax is chosen only to have some well-defined way + of referencing a part of an object. TODO: this design + is not final and this field is subject to change in the + future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + machineConfigSelector: + description: machineConfigSelector specifies a label selector for + MachineConfigs. Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + on how label and selectors work. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + maxUnavailable: + anyOf: + - type: integer + - type: string + description: "maxUnavailable defines either an integer number or percentage + of nodes in the pool that can go Unavailable during an update. This + includes nodes Unavailable for any reason, including user initiated + cordons, failing nodes, etc. The default value is 1. \n A value + larger than 1 will mean multiple nodes going unavailable during + the update, which may affect your workload stress on the remaining + nodes. You cannot set this value to 0 to stop updates (it will default + back to 1); to stop updates, use the 'paused' property instead. + Drain will respect Pod Disruption Budgets (PDBs) such as etcd quorum + guards, even if maxUnavailable is greater than one." + x-kubernetes-int-or-string: true + nodeSelector: + description: nodeSelector specifies a label selector for Machines + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + paused: + description: paused specifies whether or not changes to this machine + config pool should be stopped. This includes generating new desiredMachineConfig + and update of machines. + type: boolean + pinnedImageSets: + description: "pinnedImageSets specifies a sequence of PinnedImageSetRef + objects for the pool. Nodes within this pool will preload and pin + images defined in the PinnedImageSet. Before pulling images the + MachineConfigDaemon will ensure the total uncompressed size of all + the images does not exceed available resources. If the total size + of the images exceeds the available resources the controller will + report a Degraded status to the MachineConfigPool and not attempt + to pull any images. Also to help ensure the kubelet can mitigate + storage risk, the pinned_image configuration and subsequent service + reload will happen only after all of the images have been pulled + for each set. Images from multiple PinnedImageSets are loaded and + pinned sequentially as listed. Duplicate and existing images will + be skipped. \n Any failure to prefetch or pin images will result + in a Degraded pool. Resolving these failures is the responsibility + of the user. The admin should be proactive in ensuring adequate + storage and proper image authentication exists in advance." + items: + properties: + name: + description: name is a reference to the name of a PinnedImageSet. Must + adhere to RFC-1123 (https://tools.ietf.org/html/rfc1123). + Made up of one of more period-separated (.) segments, where + each segment consists of alphanumeric characters and hyphens + (-), must begin and end with an alphanumeric character, and + is at most 63 characters in length. The total length of the + name must not exceed 253 characters. + maxLength: 253 + minLength: 1 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - name + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + status: + description: MachineConfigPoolStatus is the status for MachineConfigPool + resource. + properties: + certExpirys: + description: certExpirys keeps track of important certificate expiration + data + items: + description: ceryExpiry contains the bundle name and the expiry + date + properties: + bundle: + description: bundle is the name of the bundle in which the subject + certificate resides + type: string + expiry: + description: expiry is the date after which the certificate + will no longer be valid + format: date-time + type: string + subject: + description: subject is the subject of the certificate + type: string + required: + - bundle + - subject + type: object + type: array + x-kubernetes-list-type: atomic + conditions: + description: conditions represents the latest available observations + of current state. + items: + description: MachineConfigPoolCondition contains condition information + for an MachineConfigPool. + properties: + lastTransitionTime: + description: lastTransitionTime is the timestamp corresponding + to the last status change of this condition. + format: date-time + nullable: true + type: string + message: + description: message is a human readable description of the + details of the last transition, complementing reason. + type: string + reason: + description: reason is a brief machine readable explanation + for the condition's last transition. + type: string + status: + description: status of the condition, one of ('True', 'False', + 'Unknown'). + type: string + type: + description: type of the condition, currently ('Done', 'Updating', + 'Failed'). + type: string + type: object + type: array + x-kubernetes-list-type: atomic + configuration: + description: configuration represents the current MachineConfig object + for the machine config pool. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + source: + description: source is the list of MachineConfig objects that + were used to generate the single MachineConfig object specified + in `content`. + items: + description: "ObjectReference contains enough information to + let you inspect or modify the referred object. --- New uses + of this type are discouraged because of difficulty describing + its usage when embedded in APIs. 1. Ignored fields. It includes + many fields which are not generally honored. For instance, + ResourceVersion and FieldPath are both very rarely valid in + actual usage. 2. Invalid usage help. It is impossible to + add specific help for individual usage. In most embedded + usages, there are particular restrictions like, \"must refer + only to types A and B\" or \"UID not honored\" or \"name must + be restricted\". Those cannot be well described when embedded. + 3. Inconsistent validation. Because the usages are different, + the validation rules are different by usage, which makes it + hard for users to predict what will happen. 4. The fields + are both imprecise and overly precise. Kind is not a precise + mapping to a URL. This can produce ambiguity during interpretation + and require a REST mapping. In most cases, the dependency + is on the group,resource tuple and the version of the actual + struct is irrelevant. 5. We cannot easily change it. Because + this type is embedded in many locations, updates to this type + will affect numerous schemas. Don't make new APIs embed an + underspecified API type they do not control. \n Instead of + using this type, create a locally provided and used type that + is well-focused on your reference. For example, ServiceReferences + for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 + ." + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that + triggered the event) or if no container name is specified + "spec.containers[2]" (container with index 2 in this pod). + This syntax is chosen only to have some well-defined way + of referencing a part of an object. TODO: this design + is not final and this field is subject to change in the + future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + degradedMachineCount: + description: degradedMachineCount represents the total number of machines + marked degraded (or unreconcilable). A node is marked degraded if + applying a configuration failed.. + format: int32 + type: integer + machineCount: + description: machineCount represents the total number of machines + in the machine config pool. + format: int32 + type: integer + observedGeneration: + description: observedGeneration represents the generation observed + by the controller. + format: int64 + type: integer + poolSynchronizersStatus: + description: poolSynchronizersStatus is the status of the machines + managed by the pool synchronizers. + items: + properties: + availableMachineCount: + description: availableMachineCount is the number of machines + managed by the node synchronizer which are available. + format: int64 + minimum: 0 + type: integer + machineCount: + description: machineCount is the number of machines that are + managed by the node synchronizer. + format: int64 + minimum: 0 + type: integer + observedGeneration: + description: observedGeneration is the last generation change + that has been applied. + format: int64 + minimum: 0 + type: integer + x-kubernetes-validations: + - message: observedGeneration must not move backwards except + to zero + rule: self >= oldSelf || (self == 0 && oldSelf > 0) + poolSynchronizerType: + description: poolSynchronizerType describes the type of the + pool synchronizer. + enum: + - PinnedImageSets + maxLength: 256 + type: string + readyMachineCount: + description: readyMachineCount is the number of machines managed + by the node synchronizer that are in a ready state. + format: int64 + minimum: 0 + type: integer + unavailableMachineCount: + description: unavailableMachineCount is the number of machines + managed by the node synchronizer but are unavailable. + format: int64 + minimum: 0 + type: integer + updatedMachineCount: + description: updatedMachineCount is the number of machines that + have been updated by the node synchronizer. + format: int64 + minimum: 0 + type: integer + required: + - availableMachineCount + - machineCount + - poolSynchronizerType + - readyMachineCount + - unavailableMachineCount + - updatedMachineCount + type: object + x-kubernetes-validations: + - message: machineCount must be greater than or equal to updatedMachineCount + rule: self.machineCount >= self.updatedMachineCount + - message: machineCount must be greater than or equal to availableMachineCount + rule: self.machineCount >= self.availableMachineCount + - message: machineCount must be greater than or equal to unavailableMachineCount + rule: self.machineCount >= self.unavailableMachineCount + - message: machineCount must be greater than or equal to readyMachineCount + rule: self.machineCount >= self.readyMachineCount + - message: availableMachineCount must be greater than or equal to + readyMachineCount + rule: self.availableMachineCount >= self.readyMachineCount + type: array + x-kubernetes-list-map-keys: + - poolSynchronizerType + x-kubernetes-list-type: map + readyMachineCount: + description: readyMachineCount represents the total number of ready + machines targeted by the pool. + format: int32 + type: integer + unavailableMachineCount: + description: unavailableMachineCount represents the total number of + unavailable (non-ready) machines targeted by the pool. A node is + marked unavailable if it is in updating state or NodeReady condition + is false. + format: int32 + type: integer + updatedMachineCount: + description: updatedMachineCount represents the total number of machines + targeted by the pool that have the CurrentMachineConfig as their + config. + format: int32 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfignodes-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfignodes-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..eed92c7361 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfignodes-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,366 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1596 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: machineconfignodes.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: MachineConfigNode + listKind: MachineConfigNodeList + plural: machineconfignodes + singular: machineconfignode + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Updated")].status + name: Updated + type: string + - jsonPath: .status.conditions[?(@.type=="UpdatePrepared")].status + name: UpdatePrepared + type: string + - jsonPath: .status.conditions[?(@.type=="UpdateExecuted")].status + name: UpdateExecuted + type: string + - jsonPath: .status.conditions[?(@.type=="UpdatePostActionComplete")].status + name: UpdatePostActionComplete + type: string + - jsonPath: .status.conditions[?(@.type=="UpdateComplete")].status + name: UpdateComplete + type: string + - jsonPath: .status.conditions[?(@.type=="Resumed")].status + name: Resumed + type: string + - jsonPath: .status.conditions[?(@.type=="UpdateCompatible")].status + name: UpdateCompatible + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="AppliedFilesAndOS")].status + name: UpdatedFilesAndOS + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Cordoned")].status + name: CordonedNode + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Drained")].status + name: DrainedNode + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="RebootedNode")].status + name: RebootedNode + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="ReloadedCRIO")].status + name: ReloadedCRIO + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Uncordoned")].status + name: UncordonedNode + priority: 1 + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: 'MachineConfigNode describes the health of the Machines on the + system Compatibility level 4: No compatibility is provided, the API can + change at any point for any reason. These capabilities should not be used + by applications needing long term support.' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec describes the configuration of the machine config node. + properties: + configVersion: + description: configVersion holds the desired config version for the + node targeted by this machine config node resource. The desired + version represents the machine config the node will attempt to update + to. This gets set before the machine config operator validates the + new machine config against the current machine config. + properties: + desired: + description: desired is the name of the machine config that the + the node should be upgraded to. This value is set when the machine + config pool generates a new version of its rendered configuration. + When this value is changed, the machine config daemon starts + the node upgrade process. This value gets set in the machine + config node spec once the machine config has been targeted for + upgrade and before it is validated. Must be a lowercase RFC-1123 + hostname (https://tools.ietf.org/html/rfc1123) It may consist + of only alphanumeric characters, hyphens (-) and periods (.) + and must be at most 253 characters in length. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - desired + type: object + node: + description: node contains a reference to the node for this machine + config node. + properties: + name: + description: name is the object name. Must be a lowercase RFC-1123 + hostname (https://tools.ietf.org/html/rfc1123) It may consist + of only alphanumeric characters, hyphens (-) and periods (.) + and must be at most 253 characters in length. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - name + type: object + pinnedImageSets: + description: pinnedImageSets holds the desired pinned image sets that + this node should pin and pull. + items: + properties: + name: + description: name is the name of the pinned image set. Must + be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + It may consist of only alphanumeric characters, hyphens (-) + and periods (.) and must be at most 253 characters in length. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - name + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + pool: + description: pool contains a reference to the machine config pool + that this machine config node's referenced node belongs to. + properties: + name: + description: name is the object name. Must be a lowercase RFC-1123 + hostname (https://tools.ietf.org/html/rfc1123) It may consist + of only alphanumeric characters, hyphens (-) and periods (.) + and must be at most 253 characters in length. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - name + type: object + required: + - configVersion + - node + - pool + type: object + status: + description: status describes the last observed state of this machine + config node. + properties: + conditions: + description: conditions represent the observations of a machine config + node's current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + configVersion: + description: configVersion describes the current and desired machine + config for this node. The current version represents the current + machine config for the node and is updated after a successful update. + The desired version represents the machine config the node will + attempt to update to. This desired machine config has been compared + to the current machine config and has been validated by the machine + config operator as one that is valid and that exists. + properties: + current: + description: current is the name of the machine config currently + in use on the node. This value is updated once the machine config + daemon has completed the update of the configuration for the + node. This value should match the desired version unless an + upgrade is in progress. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) It may consist of only + alphanumeric characters, hyphens (-) and periods (.) and must + be at most 253 characters in length. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + desired: + description: desired is the MachineConfig the node wants to upgrade + to. This value gets set in the machine config node status once + the machine config has been validated against the current machine + config. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + It may consist of only alphanumeric characters, hyphens (-) + and periods (.) and must be at most 253 characters in length. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - desired + type: object + observedGeneration: + description: observedGeneration represents the generation observed + by the controller. This field is updated when the controller observes + a change to the desiredConfig in the configVersion of the machine + config node spec. + format: int64 + type: integer + pinnedImageSets: + description: pinnedImageSets describes the current and desired pinned + image sets for this node. The current version is the generation + of the pinned image set that has most recently been successfully + pulled and pinned on this node. The desired version is the generation + of the pinned image set that is targeted to be pulled and pinned + on this node. + items: + properties: + currentGeneration: + description: currentGeneration is the generation of the pinned + image set that has most recently been successfully pulled + and pinned on this node. + format: int32 + type: integer + desiredGeneration: + description: desiredGeneration version is the generation of + the pinned image set that is targeted to be pulled and pinned + on this node. + format: int32 + minimum: 0 + type: integer + lastFailedGeneration: + description: lastFailedGeneration is the generation of the most + recent pinned image set that failed to be pulled and pinned + on this node. + format: int32 + minimum: 0 + type: integer + lastFailedGenerationErrors: + description: lastFailedGenerationErrors is a list of errors + why the lastFailed generation failed to be pulled and pinned. + items: + type: string + maxItems: 10 + type: array + name: + description: name is the name of the pinned image set. Must + be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + It may consist of only alphanumeric characters, hyphens (-) + and periods (.) and must be at most 253 characters in length. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - name + type: object + x-kubernetes-validations: + - message: desired generation must be greater than or equal to the + current generation + rule: 'has(self.desiredGeneration) && has(self.currentGeneration) + ? self.desiredGeneration >= self.currentGeneration : true' + - message: desired generation must be greater than last failed generation + rule: 'has(self.lastFailedGeneration) && has(self.desiredGeneration) + ? self.desiredGeneration >= self.lastFailedGeneration : true' + - message: desired generation must be defined if last failed generation + is defined + rule: 'has(self.lastFailedGeneration) ? has(self.desiredGeneration): + true' + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - configVersion + type: object + required: + - spec + type: object + x-kubernetes-validations: + - message: spec.node.name should match metadata.name + rule: self.metadata.name == self.spec.node.name + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..f274fba11a --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosbuilds-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,300 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1773 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: machineosbuilds.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: MachineOSBuild + listKind: MachineOSBuildList + plural: machineosbuilds + singular: machineosbuild + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Prepared")].status + name: Prepared + type: string + - jsonPath: .status.conditions[?(@.type=="Building")].status + name: Building + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Interrupted")].status + name: Interrupted + type: string + - jsonPath: .status.conditions[?(@.type=="Restarted")].status + name: Restarted + type: string + - jsonPath: .status.conditions[?(@.type=="Failed")].status + name: Failed + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: 'MachineOSBuild describes a build process managed and deployed + by the MCO Compatibility level 4: No compatibility is provided, the API + can change at any point for any reason. These capabilities should not be + used by applications needing long term support.' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec describes the configuration of the machine os build + properties: + configGeneration: + description: configGeneration tracks which version of MachineOSConfig + this build is based off of + format: int64 + minimum: 1 + type: integer + desiredConfig: + description: desiredConfig is the desired config we want to build + an image for. + properties: + name: + description: name is the name of the rendered MachineConfig object. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - name + type: object + machineOSConfig: + description: machineOSConfig is the config object which the build + is based off of + properties: + name: + description: name of the MachineOSConfig + type: string + required: + - name + type: object + renderedImagePushspec: + description: 'renderedImagePushspec is set from the MachineOSConfig + The format of the image pullspec is: host[:port][/namespace]/name: + or svc_name.namespace.svc[:port]/repository/name:' + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid :, where + '' is 64 characters long and '' is any valid string Or + it must be a valid .svc followed by a port, repository, image + name, and tag. + rule: ((self.split(':').size() == 2 && self.split(':')[1].matches('^([a-zA-Z0-9-./:])+$')) + || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme. Or it must + be a valid .svc followed by a port, repository, image name, and + tag. + rule: ((self.split(':').size() == 2 && self.split(':')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')) + || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$')) + version: + description: version tracks the newest MachineOSBuild for each MachineOSConfig + format: int64 + minimum: 1 + type: integer + required: + - configGeneration + - desiredConfig + - machineOSConfig + - renderedImagePushspec + - version + type: object + x-kubernetes-validations: + - message: machineOSBuildSpec is immutable once set + rule: self == oldSelf + status: + description: status describes the lst observed state of this machine os + build + properties: + buildEnd: + description: buildEnd describes when the build ended. + format: date-time + type: string + x-kubernetes-validations: + - message: buildEnd is immutable once set + rule: self == oldSelf + buildStart: + description: buildStart describes when the build started. + format: date-time + type: string + x-kubernetes-validations: + - message: buildStart is immutable once set + rule: self == oldSelf + builderReference: + description: ImageBuilderType describes the image builder set in the + MachineOSConfig + properties: + buildPod: + description: relatedObjects is a list of objects that are related + to the build process. + properties: + group: + description: group of the referent. + type: string + name: + description: name of the referent. + type: string + namespace: + description: namespace of the referent. + type: string + resource: + description: resource of the referent. + type: string + required: + - group + - name + - resource + type: object + imageBuilderType: + description: ImageBuilderType describes the image builder set + in the MachineOSConfig + type: string + required: + - imageBuilderType + type: object + x-kubernetes-validations: + - message: buildPod is required when imageBuilderType is PodImageBuilder, + and forbidden otherwise + rule: 'has(self.imageBuilderType) && self.imageBuilderType == ''PodImageBuilder'' + ? true : !has(self.buildPod)' + conditions: + description: 'conditions are state related conditions for the build. + Valid types are: Prepared, Building, Failed, Interrupted, and Succeeded + once a Build is marked as Failed, no future conditions can be set. + This is enforced by the MCO.' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + finalImagePullspec: + description: finalImagePushSpec describes the fully qualified pushspec + produced by this build that the final image can be. Must be in sha + format. + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: ((self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$'))) + relatedObjects: + description: relatedObjects is a list of objects that are related + to the build process. + items: + description: ObjectReference contains enough information to let + you inspect or modify the referred object. + properties: + group: + description: group of the referent. + type: string + name: + description: name of the referent. + type: string + namespace: + description: namespace of the referent. + type: string + resource: + description: resource of the referent. + type: string + required: + - group + - name + - resource + type: object + type: array + required: + - buildStart + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosconfigs-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosconfigs-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..cfd959f7f3 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_machineosconfigs-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,352 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1773 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: machineosconfigs.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: MachineOSConfig + listKind: MachineOSConfigList + plural: machineosconfigs + singular: machineosconfig + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: 'MachineOSConfig describes the configuration for a build process + managed by the MCO Compatibility level 4: No compatibility is provided, + the API can change at any point for any reason. These capabilities should + not be used by applications needing long term support.' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec describes the configuration of the machineosconfig + properties: + buildInputs: + description: buildInputs is where user input options for the build + live + properties: + baseImagePullSecret: + description: baseImagePullSecret is the secret used to pull the + base image. must live in the openshift-machine-config-operator + namespace + properties: + name: + description: name is the name of the secret used to push or + pull this MachineOSConfig object. this secret must be in + the openshift-machine-config-operator namespace. + type: string + required: + - name + type: object + baseOSExtensionsImagePullspec: + description: 'baseOSExtensionsImagePullspec is the base Extensions + image used in the build process the MachineOSConfig object will + use the in cluster image registry configuration. if you wish + to use a mirror or any other settings specific to registries.conf, + please specify those in the cluster wide registries.conf. The + format of the image pullspec is: host[:port][/namespace]/name@sha256:' + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: (self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: (self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')) + baseOSImagePullspec: + description: 'baseOSImagePullspec is the base OSImage we use to + build our custom image. the MachineOSConfig object will use + the in cluster image registry configuration. if you wish to + use a mirror or any other settings specific to registries.conf, + please specify those in the cluster wide registries.conf. The + format of the image pullspec is: host[:port][/namespace]/name@sha256:' + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: (self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: (self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')) + containerFile: + description: containerFile describes the custom data the user + has specified to build into the image. this is also commonly + called a Dockerfile and you can treat it as such. The content + is the content of your Dockerfile. + items: + description: MachineOSContainerfile contains all custom content + the user wants built into the image + properties: + containerfileArch: + default: noarch + description: 'containerfileArch describes the architecture + this containerfile is to be built for this arch is optional. + If the user does not specify an architecture, it is assumed + that the content can be applied to all architectures, + or in a single arch cluster: the only architecture.' + enum: + - arm64 + - amd64 + - ppc64le + - s390x + - aarch64 + - x86_64 + - noarch + type: string + content: + description: content is the custom content to be built + type: string + required: + - content + type: object + maxItems: 7 + minItems: 0 + type: array + x-kubernetes-list-map-keys: + - containerfileArch + x-kubernetes-list-type: map + imageBuilder: + description: machineOSImageBuilder describes which image builder + will be used in each build triggered by this MachineOSConfig + properties: + imageBuilderType: + default: PodImageBuilder + description: 'imageBuilderType specifies the backend to be + used to build the image. Valid options are: PodImageBuilder' + enum: + - PodImageBuilder + type: string + required: + - imageBuilderType + type: object + releaseVersion: + description: 'releaseVersion is associated with the base OS Image. + This is the version of Openshift that the Base Image is associated + with. This field is populated from the machine-config-osimageurl + configmap in the openshift-machine-config-operator namespace. + It will come in the format: 4.16.0-0.nightly-2024-04-03-065948 + or any valid release. The MachineOSBuilder populates this field + and validates that this is a valid stream. This is used as a + label in the dockerfile that builds the OS image.' + type: string + renderedImagePushSecret: + description: renderedImagePushSecret is the secret used to connect + to a user registry. the final image push and pull secrets should + be separate for security concerns. If the final image push secret + is somehow exfiltrated, that gives someone the power to push + images to the image repository. By comparison, if the final + image pull secret gets exfiltrated, that only gives someone + to pull images from the image repository. It's basically the + principle of least permissions. this push secret will be used + only by the MachineConfigController pod to push the image to + the final destination. Not all nodes will need to push this + image, most of them will only need to pull the image in order + to use it. + properties: + name: + description: name is the name of the secret used to push or + pull this MachineOSConfig object. this secret must be in + the openshift-machine-config-operator namespace. + type: string + required: + - name + type: object + renderedImagePushspec: + description: 'renderedImagePushspec describes the location of + the final image. the MachineOSConfig object will use the in + cluster image registry configuration. if you wish to use a mirror + or any other settings specific to registries.conf, please specify + those in the cluster wide registries.conf. The format of the + image pushspec is: host[:port][/namespace]/name: or svc_name.namespace.svc[:port]/repository/name:' + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid :, + where '' is 64 characters long and '' is any + valid string Or it must be a valid .svc followed by a port, + repository, image name, and tag. + rule: ((self.split(':').size() == 2 && self.split(':')[1].matches('^([a-zA-Z0-9-./:])+$')) + || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme. Or it must + be a valid .svc followed by a port, repository, image name, + and tag. + rule: ((self.split(':').size() == 2 && self.split(':')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')) + || self.matches('^[^.]+\\.[^.]+\\.svc:\\d+\\/[^\\/]+\\/[^\\/]+:[^\\/]+$')) + required: + - baseImagePullSecret + - imageBuilder + - renderedImagePushSecret + - renderedImagePushspec + type: object + buildOutputs: + description: buildOutputs is where user input options for the build + live + properties: + currentImagePullSecret: + description: currentImagePullSecret is the secret used to pull + the final produced image. must live in the openshift-machine-config-operator + namespace the final image push and pull secrets should be separate + for security concerns. If the final image push secret is somehow + exfiltrated, that gives someone the power to push images to + the image repository. By comparison, if the final image pull + secret gets exfiltrated, that only gives someone to pull images + from the image repository. It's basically the principle of least + permissions. this pull secret will be used on all nodes in the + pool. These nodes will need to pull the final OS image and boot + into it using rpm-ostree or bootc. + properties: + name: + description: name is the name of the secret used to push or + pull this MachineOSConfig object. this secret must be in + the openshift-machine-config-operator namespace. + type: string + required: + - name + type: object + type: object + machineConfigPool: + description: machineConfigPool is the pool which the build is for + properties: + name: + description: name of the MachineConfigPool object. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + required: + - name + type: object + required: + - buildInputs + - machineConfigPool + type: object + status: + description: status describes the status of the machineosconfig + properties: + conditions: + description: conditions are state related conditions for the config. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + currentImagePullspec: + description: currentImagePullspec is the fully qualified image pull + spec used by the MCO to pull down the new OSImage. This must include + sha256. + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: (self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$')) + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: (self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$')) + observedGeneration: + description: observedGeneration represents the generation observed + by the controller. this field is updated when the user changes the + configuration in BuildSettings or the MCP this object is associated + with. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_pinnedimagesets-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_pinnedimagesets-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..d8c32dc062 --- /dev/null +++ b/vendor/github.com/openshift/api/machineconfiguration/v1alpha1/zz_generated.crd-manifests/0000_80_machine-config_01_pinnedimagesets-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,165 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1713 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + labels: + openshift.io/operator-managed: "" + name: pinnedimagesets.machineconfiguration.openshift.io +spec: + group: machineconfiguration.openshift.io + names: + kind: PinnedImageSet + listKind: PinnedImageSetList + plural: pinnedimagesets + singular: pinnedimageset + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "PinnedImageSet describes a set of images that should be pinned + by CRI-O and pulled to the nodes which are members of the declared MachineConfigPools. + \n Compatibility level 4: No compatibility is provided, the API can change + at any point for any reason. These capabilities should not be used by applications + needing long term support." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec describes the configuration of this pinned image set. + properties: + pinnedImages: + description: "pinnedImages is a list of OCI Image referenced by digest + that should be pinned and pre-loaded by the nodes of a MachineConfigPool. + Translates into a new file inside the /etc/crio/crio.conf.d directory + with content similar to this: \n pinned_images = [ \"quay.io/openshift-release-dev/ocp-release@sha256:...\", + \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:...\", \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:...\", + ... ] \n These image references should all be by digest, tags aren't + allowed." + items: + properties: + name: + description: "name is an OCI Image referenced by digest. \n + The format of the image ref is: host[:port][/namespace]/name@sha256:" + maxLength: 447 + minLength: 1 + type: string + x-kubernetes-validations: + - message: the OCI Image reference must end with a valid '@sha256:' + suffix, where '' is 64 characters long + rule: self.split('@').size() == 2 && self.split('@')[1].matches('^sha256:[a-f0-9]{64}$') + - message: the OCI Image name should follow the host[:port][/namespace]/name + format, resembling a valid URL without the scheme + rule: self.split('@')[0].matches('^([a-zA-Z0-9-]+\\.)+[a-zA-Z0-9-]+(:[0-9]{2,5})?/([a-zA-Z0-9-_]{0,61}/)?[a-zA-Z0-9-_.]*?$') + required: + - name + type: object + maxItems: 500 + minItems: 1 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - pinnedImages + type: object + status: + description: status describes the last observed state of this pinned image + set. + properties: + conditions: + description: conditions represent the observations of a pinned image + set's current state. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..2e426af02d --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_12_etcd_01_etcds-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,277 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/752 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: etcds.operator.openshift.io +spec: + group: operator.openshift.io + names: + categories: + - coreoperators + kind: Etcd + listKind: EtcdList + plural: etcds + singular: etcd + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Etcd provides information to configure an operator to manage + etcd. \n Compatibility level 1: Stable within a major release for a minimum + of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + controlPlaneHardwareSpeed: + description: HardwareSpeed allows user to change the etcd tuning profile + which configures the latency parameters for heartbeat interval and + leader election timeouts allowing the cluster to tolerate longer + round-trip-times between etcd members. Valid values are "", "Standard" + and "Slower". "" means no opinion and the platform is left to choose + a reasonable default which is subject to change without notice. + enum: + - "" + - Standard + - Slower + type: string + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod + installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment + of the operand by providing a unique string. This provides a mechanism + to kick a previously failed deployment and provide a reason why + you think it will work this time instead of failing again on the + same config. + type: string + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static + pod installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration + that was computed by the operator. Red Hat does not support the + use of this field. Misuse of this field could lead to unexpected + behavior or conflict with other configuration options. Seek guidance + from the Red Hat support before using this field. Use of this property + blocks cluster upgrades, it must be removed before upgrading your + cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + required: + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + controlPlaneHardwareSpeed: + description: ControlPlaneHardwareSpeed declares valid hardware speed + tolerance levels + enum: + - "" + - Standard + - Slower + type: string + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + x-kubernetes-list-type: atomic + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most + recent deployment + format: int32 + type: integer + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason + for the most recent deployment + type: string + nodeStatuses: + description: nodeStatuses track the deployment values and errors across + individual nodes + items: + description: NodeStatus provides information about the current state + of a particular node managed by this operator. + properties: + currentRevision: + description: currentRevision is the generation of the most recently + successful deployment + format: int32 + type: integer + lastFailedCount: + description: lastFailedCount is how often the installer pod + of the last failed revision failed. + type: integer + lastFailedReason: + description: lastFailedReason is a machine readable failure + reason string. + type: string + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment + we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of human readable + errors during the failed deployment referenced in lastFailedRevision. + items: + type: string + type: array + x-kubernetes-list-type: atomic + lastFailedTime: + description: lastFailedTime is the time the last failed revision + failed the last time. + format: date-time + type: string + lastFallbackCount: + description: lastFallbackCount is how often a fallback to a + previous revision happened. + type: integer + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment + we're trying to apply + format: int32 + type: integer + required: + - nodeName + type: object + type: array + x-kubernetes-list-map-keys: + - nodeName + x-kubernetes-list-type: map + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..d4200688e4 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_80_machine-config_01_machineconfigurations-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,1293 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1453 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: machineconfigurations.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: MachineConfiguration + listKind: MachineConfigurationList + plural: machineconfigurations + singular: machineconfiguration + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "MachineConfiguration provides information to configure an operator + to manage Machine Configuration. \n Compatibility level 1: Stable within + a major release for a minimum of 12 months or 3 minor releases (whichever + is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired behavior of the + Machine Config Operator + properties: + failedRevisionLimit: + description: failedRevisionLimit is the number of failed static pod + installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + forceRedeploymentReason: + description: forceRedeploymentReason can be used to force the redeployment + of the operand by providing a unique string. This provides a mechanism + to kick a previously failed deployment and provide a reason why + you think it will work this time instead of failing again on the + same config. + type: string + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managedBootImages: + description: managedBootImages allows configuration for the management + of boot images for machine resources within the cluster. This configuration + allows users to select resources that should be updated to the latest + boot images during cluster upgrades, ensuring that new machines + always boot with the current cluster version's boot image. When + omitted, no boot images will be updated. + properties: + machineManagers: + description: machineManagers can be used to register machine management + resources for boot image updates. The Machine Config Operator + will watch for changes to this list. Only one entry is permitted + per type of machine management resource. + items: + description: MachineManager describes a target machine resource + that is registered for boot image updates. It stores identifying + information such as the resource type and the API Group of + the resource. It also provides granular control via the selection + field. + properties: + apiGroup: + description: apiGroup is name of the APIGroup that the machine + management resource belongs to. The only current valid + value is machine.openshift.io. machine.openshift.io means + that the machine manager will only register resources + that belong to OpenShift machine API group. + enum: + - machine.openshift.io + type: string + resource: + description: resource is the machine management resource's + type. The only current valid value is machinesets. machinesets + means that the machine manager will only register resources + of the kind MachineSet. + enum: + - machinesets + type: string + selection: + description: selection allows granular control of the machine + management resources that will be registered for boot + image updates. + properties: + mode: + description: mode determines how machine managers will + be selected for updates. Valid values are All and + Partial. All means that every resource matched by + the machine manager will be updated. Partial requires + specified selector(s) and allows customisation of + which resources matched by the machine manager will + be updated. + enum: + - All + - Partial + type: string + partial: + description: partial provides label selector(s) that + can be used to match machine management resources. + Only permitted when mode is set to "Partial". + properties: + machineResourceSelector: + description: machineResourceSelector is a label + selector that can be used to select machine resources + like MachineSets. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: A label selector requirement + is a selector that contains values, a key, + and an operator that relates the key and + values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. + If the operator is Exists or DoesNotExist, + the values array must be empty. This + array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is + "In", and the values array contains only "value". + The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + required: + - machineResourceSelector + type: object + required: + - mode + type: object + x-kubernetes-validations: + - message: Partial is required when type is partial, and + forbidden otherwise + rule: 'has(self.mode) && self.mode == ''Partial'' ? has(self.partial) + : !has(self.partial)' + required: + - apiGroup + - resource + - selection + type: object + type: array + x-kubernetes-list-map-keys: + - resource + - apiGroup + x-kubernetes-list-type: map + type: object + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + nodeDisruptionPolicy: + description: nodeDisruptionPolicy allows an admin to set granular + node disruption actions for MachineConfig-based updates, such as + drains, service reloads, etc. Specifying this will allow for less + downtime when doing small configuration updates to the cluster. + This configuration has no effect on cluster upgrades which will + still incur node disruption where required. + properties: + files: + description: files is a list of MachineConfig file definitions + and actions to take to changes on those paths This list supports + a maximum of 50 entries. + items: + description: NodeDisruptionPolicySpecFile is a file entry and + corresponding actions to take and is used in the NodeDisruptionPolicyConfig + object + properties: + actions: + description: actions represents the series of commands to + be executed on changes to the file at the corresponding + file path. Actions will be applied in the order that they + are set in this list. If there are other incoming changes + to other MachineConfig entries in the same update that + require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload + and None. The Reboot action and the None action cannot + be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: serviceName is the full name (e.g. + crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} + and can up to 255 characters long. ${NAME} must + be atleast 1 character long and can only consist + of alphabets, digits, ":", "-", "_", ".", and + "\". ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer", ".snapshot", + ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to restart, + only valid if type is restart + properties: + serviceName: + description: serviceName is the full name (e.g. + crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} + and can up to 255 characters long. ${NAME} must + be atleast 1 character long and can only consist + of alphabets, digits, ":", "-", "_", ".", and + "\". ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer", ".snapshot", + ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: type represents the commands that will + be carried out if this NodeDisruptionPolicySpecActionType + is executed Valid value are Reboot, Drain, Reload, + Restart, DaemonReload, None and Special reload/restart + requires a corresponding service target specified + in the reload/restart field. Other values require + no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' ? has(self.reload) + : !has(self.reload)' + - message: restart is required when type is Restart, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' ? + has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) == + 1 : true' + path: + description: path is the location of a file being managed + through a MachineConfig. The Actions in the policy will + apply to changes to the file at this path. + type: string + required: + - actions + - path + type: object + maxItems: 50 + type: array + x-kubernetes-list-map-keys: + - path + x-kubernetes-list-type: map + sshkey: + description: sshkey maps to the ignition.sshkeys field in the + MachineConfig object, definition an action for this will apply + to all sshkey changes in the cluster + properties: + actions: + description: actions represents the series of commands to + be executed on changes to the file at the corresponding + file path. Actions will be applied in the order that they + are set in this list. If there are other incoming changes + to other MachineConfig entries in the same update that require + a reboot, the reboot will supercede these actions. Valid + actions are Reboot, Drain, Reload, DaemonReload and None. + The Reboot action and the None action cannot be used in + conjunction with any of the other actions. This list supports + a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: serviceName is the full name (e.g. + crio.service) of the service to be reloaded Service + names should be of the format ${NAME}${SERVICETYPE} + and can up to 255 characters long. ${NAME} must + be atleast 1 character long and can only consist + of alphabets, digits, ":", "-", "_", ".", and + "\". ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer", ".snapshot", + ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, where + ${SERVICETYPE} must be one of ".service", ".socket", + ".device", ".mount", ".automount", ".swap", + ".target", ".path", ".timer",".snapshot", ".slice" + or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to restart, + only valid if type is restart + properties: + serviceName: + description: serviceName is the full name (e.g. + crio.service) of the service to be restarted Service + names should be of the format ${NAME}${SERVICETYPE} + and can up to 255 characters long. ${NAME} must + be atleast 1 character long and can only consist + of alphabets, digits, ":", "-", "_", ".", and + "\". ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer", ".snapshot", + ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, where + ${SERVICETYPE} must be one of ".service", ".socket", + ".device", ".mount", ".automount", ".swap", + ".target", ".path", ".timer",".snapshot", ".slice" + or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: type represents the commands that will + be carried out if this NodeDisruptionPolicySpecActionType + is executed Valid value are Reboot, Drain, Reload, + Restart, DaemonReload, None and Special reload/restart + requires a corresponding service target specified + in the reload/restart field. Other values require + no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, and forbidden + otherwise + rule: 'has(self.type) && self.type == ''Reload'' ? has(self.reload) + : !has(self.reload)' + - message: restart is required when type is Restart, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' ? has(self.restart) + : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) == + 1 : true' + - message: None action can only be specified standalone, as + it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) == + 1 : true' + required: + - actions + type: object + units: + description: units is a list MachineConfig unit definitions and + actions to take on changes to those services This list supports + a maximum of 50 entries. + items: + description: NodeDisruptionPolicySpecUnit is a systemd unit + name and corresponding actions to take and is used in the + NodeDisruptionPolicyConfig object + properties: + actions: + description: actions represents the series of commands to + be executed on changes to the file at the corresponding + file path. Actions will be applied in the order that they + are set in this list. If there are other incoming changes + to other MachineConfig entries in the same update that + require a reboot, the reboot will supercede these actions. + Valid actions are Reboot, Drain, Reload, DaemonReload + and None. The Reboot action and the None action cannot + be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: serviceName is the full name (e.g. + crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} + and can up to 255 characters long. ${NAME} must + be atleast 1 character long and can only consist + of alphabets, digits, ":", "-", "_", ".", and + "\". ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer", ".snapshot", + ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to restart, + only valid if type is restart + properties: + serviceName: + description: serviceName is the full name (e.g. + crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} + and can up to 255 characters long. ${NAME} must + be atleast 1 character long and can only consist + of alphabets, digits, ":", "-", "_", ".", and + "\". ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer", ".snapshot", + ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: type represents the commands that will + be carried out if this NodeDisruptionPolicySpecActionType + is executed Valid value are Reboot, Drain, Reload, + Restart, DaemonReload, None and Special reload/restart + requires a corresponding service target specified + in the reload/restart field. Other values require + no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' ? has(self.reload) + : !has(self.reload)' + - message: restart is required when type is Restart, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' ? + has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) == + 1 : true' + name: + description: name represents the service name of a systemd + service managed through a MachineConfig Actions specified + will be applied for changes to the named service. Service + names should be of the format ${NAME}${SERVICETYPE} and + can up to 255 characters long. ${NAME} must be atleast + 1 character long and can only consist of alphabets, digits, + ":", "-", "_", ".", and "\". ${SERVICETYPE} must be one + of ".service", ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer", ".snapshot", ".slice" + or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. Expected + format is ${NAME}${SERVICETYPE}, where ${SERVICETYPE} + must be one of ".service", ".socket", ".device", ".mount", + ".automount", ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected format + is ${NAME}${SERVICETYPE}, where {NAME} must be atleast + 1 character long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - actions + - name + type: object + maxItems: 50 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + succeededRevisionLimit: + description: succeededRevisionLimit is the number of successful static + pod installer revisions to keep on disk and in the api -1 = unlimited, + 0 or unset = 5 (default) + format: int32 + type: integer + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration + that was computed by the operator. Red Hat does not support the + use of this field. Misuse of this field could lead to unexpected + behavior or conflict with other configuration options. Seek guidance + from the Red Hat support before using this field. Use of this property + blocks cluster upgrades, it must be removed before upgrading your + cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status is the most recently observed status of the Machine + Config Operator + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + required: + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + x-kubernetes-list-type: atomic + latestAvailableRevision: + description: latestAvailableRevision is the deploymentID of the most + recent deployment + format: int32 + type: integer + latestAvailableRevisionReason: + description: latestAvailableRevisionReason describe the detailed reason + for the most recent deployment + type: string + nodeDisruptionPolicyStatus: + description: nodeDisruptionPolicyStatus status reflects what the latest + cluster-validated policies are, and will be used by the Machine + Config Daemon during future node updates. + properties: + clusterPolicies: + description: clusterPolicies is a merge of cluster default and + user provided node disruption policies. + properties: + files: + description: files is a list of MachineConfig file definitions + and actions to take to changes on those paths + items: + description: NodeDisruptionPolicyStatusFile is a file entry + and corresponding actions to take and is used in the NodeDisruptionPolicyClusterStatus + object + properties: + actions: + description: actions represents the series of commands + to be executed on changes to the file at the corresponding + file path. Actions will be applied in the order that + they are set in this list. If there are other incoming + changes to other MachineConfig entries in the same + update that require a reboot, the reboot will supercede + these actions. Valid actions are Reboot, Drain, Reload, + DaemonReload and None. The Reboot action and the None + action cannot be used in conjunction with any of the + other actions. This list supports a maximum of 10 + entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: serviceName is the full name + (e.g. crio.service) of the service to be + reloaded Service names should be of the + format ${NAME}${SERVICETYPE} and can up + to 255 characters long. ${NAME} must be + atleast 1 character long and can only consist + of alphabets, digits, ":", "-", "_", ".", + and "\". ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer", ".snapshot", + ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to + restart, only valid if type is restart + properties: + serviceName: + description: serviceName is the full name + (e.g. crio.service) of the service to be + restarted Service names should be of the + format ${NAME}${SERVICETYPE} and can up + to 255 characters long. ${NAME} must be + atleast 1 character long and can only consist + of alphabets, digits, ":", "-", "_", ".", + and "\". ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer", ".snapshot", + ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: type represents the commands that + will be carried out if this NodeDisruptionPolicyStatusActionType + is executed Valid value are Reboot, Drain, Reload, + Restart, DaemonReload, None and Special reload/restart + requires a corresponding service target specified + in the reload/restart field. Other values require + no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + - Special + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' + ? has(self.reload) : !has(self.reload)' + - message: restart is required when type is Restart, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' + ? has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) + == 1 : true' + path: + description: path is the location of a file being managed + through a MachineConfig. The Actions in the policy + will apply to changes to the file at this path. + type: string + required: + - actions + - path + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - path + x-kubernetes-list-type: map + sshkey: + description: sshkey is the overall sshkey MachineConfig definition + properties: + actions: + description: actions represents the series of commands + to be executed on changes to the file at the corresponding + file path. Actions will be applied in the order that + they are set in this list. If there are other incoming + changes to other MachineConfig entries in the same update + that require a reboot, the reboot will supercede these + actions. Valid actions are Reboot, Drain, Reload, DaemonReload + and None. The Reboot action and the None action cannot + be used in conjunction with any of the other actions. + This list supports a maximum of 10 entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: serviceName is the full name (e.g. + crio.service) of the service to be reloaded + Service names should be of the format ${NAME}${SERVICETYPE} + and can up to 255 characters long. ${NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\". ${SERVICETYPE} must be one of + ".service", ".socket", ".device", ".mount", + ".automount", ".swap", ".target", ".path", + ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to restart, + only valid if type is restart + properties: + serviceName: + description: serviceName is the full name (e.g. + crio.service) of the service to be restarted + Service names should be of the format ${NAME}${SERVICETYPE} + and can up to 255 characters long. ${NAME} + must be atleast 1 character long and can only + consist of alphabets, digits, ":", "-", "_", + ".", and "\". ${SERVICETYPE} must be one of + ".service", ".socket", ".device", ".mount", + ".automount", ".swap", ".target", ".path", + ".timer", ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: type represents the commands that will + be carried out if this NodeDisruptionPolicyStatusActionType + is executed Valid value are Reboot, Drain, Reload, + Restart, DaemonReload, None and Special reload/restart + requires a corresponding service target specified + in the reload/restart field. Other values require + no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + - Special + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, and + forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' ? + has(self.reload) : !has(self.reload)' + - message: restart is required when type is Restart, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' + ? has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) + == 1 : true' + required: + - actions + type: object + units: + description: units is a list MachineConfig unit definitions + and actions to take on changes to those services + items: + description: NodeDisruptionPolicyStatusUnit is a systemd + unit name and corresponding actions to take and is used + in the NodeDisruptionPolicyClusterStatus object + properties: + actions: + description: actions represents the series of commands + to be executed on changes to the file at the corresponding + file path. Actions will be applied in the order that + they are set in this list. If there are other incoming + changes to other MachineConfig entries in the same + update that require a reboot, the reboot will supercede + these actions. Valid actions are Reboot, Drain, Reload, + DaemonReload and None. The Reboot action and the None + action cannot be used in conjunction with any of the + other actions. This list supports a maximum of 10 + entries. + items: + properties: + reload: + description: reload specifies the service to reload, + only valid if type is reload + properties: + serviceName: + description: serviceName is the full name + (e.g. crio.service) of the service to be + reloaded Service names should be of the + format ${NAME}${SERVICETYPE} and can up + to 255 characters long. ${NAME} must be + atleast 1 character long and can only consist + of alphabets, digits, ":", "-", "_", ".", + and "\". ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer", ".snapshot", + ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + restart: + description: restart specifies the service to + restart, only valid if type is restart + properties: + serviceName: + description: serviceName is the full name + (e.g. crio.service) of the service to be + restarted Service names should be of the + format ${NAME}${SERVICETYPE} and can up + to 255 characters long. ${NAME} must be + atleast 1 character long and can only consist + of alphabets, digits, ":", "-", "_", ".", + and "\". ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer", ".snapshot", + ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service + name. Expected format is ${NAME}${SERVICETYPE}, + where ${SERVICETYPE} must be one of ".service", + ".socket", ".device", ".mount", ".automount", + ".swap", ".target", ".path", ".timer",".snapshot", + ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. + Expected format is ${NAME}${SERVICETYPE}, + where {NAME} must be atleast 1 character + long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - serviceName + type: object + type: + description: type represents the commands that + will be carried out if this NodeDisruptionPolicyStatusActionType + is executed Valid value are Reboot, Drain, Reload, + Restart, DaemonReload, None and Special reload/restart + requires a corresponding service target specified + in the reload/restart field. Other values require + no further configuration + enum: + - Reboot + - Drain + - Reload + - Restart + - DaemonReload + - None + - Special + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: reload is required when type is Reload, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Reload'' + ? has(self.reload) : !has(self.reload)' + - message: restart is required when type is Restart, + and forbidden otherwise + rule: 'has(self.type) && self.type == ''Restart'' + ? has(self.restart) : !has(self.restart)' + maxItems: 10 + type: array + x-kubernetes-list-type: atomic + x-kubernetes-validations: + - message: Reboot action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''Reboot'') ? size(self) + == 1 : true' + - message: None action can only be specified standalone, + as it will override any other actions + rule: 'self.exists(x, x.type==''None'') ? size(self) + == 1 : true' + name: + description: name represents the service name of a systemd + service managed through a MachineConfig Actions specified + will be applied for changes to the named service. + Service names should be of the format ${NAME}${SERVICETYPE} + and can up to 255 characters long. ${NAME} must be + atleast 1 character long and can only consist of alphabets, + digits, ":", "-", "_", ".", and "\". ${SERVICETYPE} + must be one of ".service", ".socket", ".device", ".mount", + ".automount", ".swap", ".target", ".path", ".timer", + ".snapshot", ".slice" or ".scope". + maxLength: 255 + type: string + x-kubernetes-validations: + - message: Invalid ${SERVICETYPE} in service name. Expected + format is ${NAME}${SERVICETYPE}, where ${SERVICETYPE} + must be one of ".service", ".socket", ".device", + ".mount", ".automount", ".swap", ".target", ".path", + ".timer",".snapshot", ".slice" or ".scope". + rule: self.matches('\\.(service|socket|device|mount|automount|swap|target|path|timer|snapshot|slice|scope)$') + - message: Invalid ${NAME} in service name. Expected + format is ${NAME}${SERVICETYPE}, where {NAME} must + be atleast 1 character long and can only consist + of alphabets, digits, ":", "-", "_", ".", and "\" + rule: self.matches('^[a-zA-Z0-9:._\\\\-]+\\..') + required: + - actions + - name + type: object + maxItems: 100 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + type: object + nodeStatuses: + description: nodeStatuses track the deployment values and errors across + individual nodes + items: + description: NodeStatus provides information about the current state + of a particular node managed by this operator. + properties: + currentRevision: + description: currentRevision is the generation of the most recently + successful deployment + format: int32 + type: integer + lastFailedCount: + description: lastFailedCount is how often the installer pod + of the last failed revision failed. + type: integer + lastFailedReason: + description: lastFailedReason is a machine readable failure + reason string. + type: string + lastFailedRevision: + description: lastFailedRevision is the generation of the deployment + we tried and failed to deploy. + format: int32 + type: integer + lastFailedRevisionErrors: + description: lastFailedRevisionErrors is a list of human readable + errors during the failed deployment referenced in lastFailedRevision. + items: + type: string + type: array + x-kubernetes-list-type: atomic + lastFailedTime: + description: lastFailedTime is the time the last failed revision + failed the last time. + format: date-time + type: string + lastFallbackCount: + description: lastFallbackCount is how often a fallback to a + previous revision happened. + type: integer + nodeName: + description: nodeName is the name of the node + type: string + targetRevision: + description: targetRevision is the generation of the deployment + we're trying to apply + format: int32 + type: integer + required: + - nodeName + type: object + type: array + x-kubernetes-list-map-keys: + - nodeName + x-kubernetes-list-type: map + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..51728c8f5d --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.crd-manifests/0000_90_csi-driver_01_clustercsidrivers-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,408 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/701 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: clustercsidrivers.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: ClusterCSIDriver + listKind: ClusterCSIDriverList + plural: clustercsidrivers + singular: clustercsidriver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ClusterCSIDriver object allows management and configuration + of a CSI driver operator installed by default in OpenShift. Name of the + object must be name of the CSI driver it operates. See CSIDriverName type + for list of allowed values. \n Compatibility level 1: Stable within a major + release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + properties: + name: + enum: + - ebs.csi.aws.com + - efs.csi.aws.com + - disk.csi.azure.com + - file.csi.azure.com + - filestore.csi.storage.gke.io + - pd.csi.storage.gke.io + - cinder.csi.openstack.org + - csi.vsphere.vmware.com + - manila.csi.openstack.org + - csi.ovirt.org + - csi.kubevirt.io + - csi.sharedresource.openshift.io + - diskplugin.csi.alibabacloud.com + - vpc.block.csi.ibm.io + - powervs.csi.ibm.com + - secrets-store.csi.k8s.io + - smb.csi.k8s.io + type: string + type: object + spec: + description: spec holds user settable values for configuration + properties: + driverConfig: + description: driverConfig can be used to specify platform specific + driver configuration. When omitted, this means no opinion and the + platform is left to choose reasonable defaults. These defaults are + subject to change over time. + properties: + aws: + description: aws is used to configure the AWS CSI driver. + properties: + kmsKeyARN: + description: kmsKeyARN sets the cluster default storage class + to encrypt volumes with a user-defined KMS key, rather than + the default KMS key used by AWS. The value may be either + the ARN or Alias ARN of a KMS key. + pattern: ^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b|aws-iso-e|aws-iso-f):kms:[a-z0-9-]+:[0-9]{12}:(key|alias)\/.*$ + type: string + type: object + azure: + description: azure is used to configure the Azure CSI driver. + properties: + diskEncryptionSet: + description: diskEncryptionSet sets the cluster default storage + class to encrypt volumes with a customer-managed encryption + set, rather than the default platform-managed keys. + properties: + name: + description: name is the name of the disk encryption set + that will be set on the default storage class. The value + should consist of only alphanumberic characters, underscores + (_), hyphens, and be at most 80 characters in length. + maxLength: 80 + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + resourceGroup: + description: resourceGroup defines the Azure resource + group that contains the disk encryption set. The value + should consist of only alphanumberic characters, underscores + (_), parentheses, hyphens and periods. The value should + not end in a period and be at most 90 characters in + length. + maxLength: 90 + pattern: ^[\w\.\-\(\)]*[\w\-\(\)]$ + type: string + subscriptionID: + description: 'subscriptionID defines the Azure subscription + that contains the disk encryption set. The value should + meet the following conditions: 1. It should be a 128-bit + number. 2. It should be 36 characters (32 hexadecimal + characters and 4 hyphens) long. 3. It should be displayed + in five groups separated by hyphens (-). 4. The first + group should be 8 characters long. 5. The second, third, + and fourth groups should be 4 characters long. 6. The + fifth group should be 12 characters long. An Example + SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378' + maxLength: 36 + pattern: ^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$ + type: string + required: + - name + - resourceGroup + - subscriptionID + type: object + type: object + driverType: + description: 'driverType indicates type of CSI driver for which + the driverConfig is being applied to. Valid values are: AWS, + Azure, GCP, IBMCloud, vSphere and omitted. Consumers should + treat unknown values as a NO-OP.' + enum: + - "" + - AWS + - Azure + - GCP + - IBMCloud + - vSphere + type: string + gcp: + description: gcp is used to configure the GCP CSI driver. + properties: + kmsKey: + description: kmsKey sets the cluster default storage class + to encrypt volumes with customer-supplied encryption keys, + rather than the default keys managed by GCP. + properties: + keyRing: + description: keyRing is the name of the KMS Key Ring which + the KMS Key belongs to. The value should correspond + to an existing KMS key ring and should consist of only + alphanumeric characters, hyphens (-) and underscores + (_), and be at most 63 characters in length. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + location: + description: location is the GCP location in which the + Key Ring exists. The value must match an existing GCP + location, or "global". Defaults to global, if not set. + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + name: + description: name is the name of the customer-managed + encryption key to be used for disk encryption. The value + should correspond to an existing KMS key and should + consist of only alphanumeric characters, hyphens (-) + and underscores (_), and be at most 63 characters in + length. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + projectID: + description: projectID is the ID of the Project in which + the KMS Key Ring exists. It must be 6 to 30 lowercase + letters, digits, or hyphens. It must start with a letter. + Trailing hyphens are prohibited. + maxLength: 30 + minLength: 6 + pattern: ^[a-z][a-z0-9-]+[a-z0-9]$ + type: string + required: + - keyRing + - name + - projectID + type: object + type: object + ibmcloud: + description: ibmcloud is used to configure the IBM Cloud CSI driver. + properties: + encryptionKeyCRN: + description: encryptionKeyCRN is the IBM Cloud CRN of the + customer-managed root key to use for disk encryption of + volumes for the default storage classes. + maxLength: 154 + minLength: 144 + pattern: ^crn:v[0-9]+:bluemix:(public|private):(kms|hs-crypto):[a-z-]+:a/[0-9a-f]+:[0-9a-f-]{36}:key:[0-9a-f-]{36}$ + type: string + required: + - encryptionKeyCRN + type: object + vSphere: + description: vsphere is used to configure the vsphere CSI driver. + properties: + globalMaxSnapshotsPerBlockVolume: + description: 'globalMaxSnapshotsPerBlockVolume is a global + configuration parameter that applies to volumes on all kinds + of datastores. If omitted, the platform chooses a default, + which is subject to change over time, currently that default + is 3. Snapshots can not be disabled using this parameter. + Increasing number of snapshots above 3 can have negative + impact on performance, for more details see: https://kb.vmware.com/s/article/1025279 + Volume snapshot documentation: https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/3.0/vmware-vsphere-csp-getting-started/GUID-E0B41C69-7EEB-450F-A73D-5FD2FF39E891.html' + format: int32 + maximum: 32 + minimum: 1 + type: integer + granularMaxSnapshotsPerBlockVolumeInVSAN: + description: granularMaxSnapshotsPerBlockVolumeInVSAN is a + granular configuration parameter on vSAN datastore only. + It overrides GlobalMaxSnapshotsPerBlockVolume if set, while + it falls back to the global constraint if unset. Snapshots + for VSAN can not be disabled using this parameter. + format: int32 + maximum: 32 + minimum: 1 + type: integer + granularMaxSnapshotsPerBlockVolumeInVVOL: + description: granularMaxSnapshotsPerBlockVolumeInVVOL is a + granular configuration parameter on Virtual Volumes datastore + only. It overrides GlobalMaxSnapshotsPerBlockVolume if set, + while it falls back to the global constraint if unset. Snapshots + for VVOL can not be disabled using this parameter. + format: int32 + maximum: 32 + minimum: 1 + type: integer + topologyCategories: + description: topologyCategories indicates tag categories with + which vcenter resources such as hostcluster or datacenter + were tagged with. If cluster Infrastructure object has a + topology, values specified in Infrastructure object will + be used and modifications to topologyCategories will be + rejected. + items: + type: string + type: array + type: object + required: + - driverType + type: object + x-kubernetes-validations: + - message: ibmcloud must be set if driverType is 'IBMCloud', but remain + unset otherwise + rule: 'has(self.driverType) && self.driverType == ''IBMCloud'' ? + has(self.ibmcloud) : !has(self.ibmcloud)' + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + storageClassState: + description: StorageClassState determines if CSI operator should create + and manage storage classes. If this field value is empty or Managed + - CSI operator will continuously reconcile storage class and create + if necessary. If this field value is Unmanaged - CSI operator will + not reconcile any previously created storage class. If this field + value is Removed - CSI operator will delete the storage class it + created previously. When omitted, this means the user has no opinion + and the platform chooses a reasonable default, which is subject + to change over time. The current default behaviour is Managed. + enum: + - "" + - Managed + - Unmanaged + - Removed + type: string + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration + that was computed by the operator. Red Hat does not support the + use of this field. Misuse of this field could lead to unexpected + behavior or conflict with other configuration options. Seek guidance + from the Red Hat support before using this field. Use of this property + blocks cluster upgrades, it must be removed before upgrading your + cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + required: + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + x-kubernetes-list-type: atomic + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..45b1a9d5bb --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_etcd_01_etcdbackups-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,158 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1482 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: etcdbackups.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: EtcdBackup + listKind: EtcdBackupList + plural: etcdbackups + singular: etcdbackup + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "# EtcdBackup provides configuration options and status for a + one-time backup attempt of the etcd cluster \n Compatibility level 4: No + compatibility is provided, the API can change at any point for any reason. + These capabilities should not be used by applications needing long term + support." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + pvcName: + description: PVCName specifies the name of the PersistentVolumeClaim + (PVC) which binds a PersistentVolume where the etcd backup file + would be saved The PVC itself must always be created in the "openshift-etcd" + namespace If the PVC is left unspecified "" then the platform will + choose a reasonable default location to save the backup. In the + future this would be backups saved across the control-plane master + nodes. + type: string + x-kubernetes-validations: + - message: pvcName is immutable once set + rule: self == oldSelf + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + properties: + backupJob: + description: backupJob is the reference to the Job that executes the + backup. Optional + properties: + name: + description: name is the name of the Job. Required + type: string + namespace: + description: namespace is the namespace of the Job. this is always + expected to be "openshift-etcd" since the user provided PVC + is also required to be in "openshift-etcd" Required + pattern: ^openshift-etcd$ + type: string + required: + - name + - namespace + type: object + conditions: + description: conditions provide details on the status of the etcd + backup job. + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms-DevPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms-DevPreviewNoUpgrade.crd.yaml new file mode 100644 index 0000000000..8a18f4bc37 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.crd-manifests/0000_10_operator-lifecycle-manager_01_olms-DevPreviewNoUpgrade.crd.yaml @@ -0,0 +1,179 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1504 + api.openshift.io/merged-by-featuregates: "true" + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + release.openshift.io/feature-set: DevPreviewNoUpgrade + name: olms.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: OLM + listKind: OLMList + plural: olms + singular: olm + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "OLM provides information to configure an operator to manage + the OLM controllers \n Compatibility level 4: No compatibility is provided, + the API can change at any point for any reason. These capabilities should + not be used by applications needing long term support." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. + \ It does not give fine grained control, but it is a simple way + to manage coarse grained logging choices that operators have to + interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", + \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + managementState: + description: managementState indicates whether and how the operator + should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + observedConfig: + description: observedConfig holds a sparse config that controller + has observed from the cluster state. It exists in spec because + it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the + operator itself. It does not give fine grained control, but it + is a simple way to manage coarse grained logging choices that operators + have to interpret for themselves. \n Valid values are: \"Normal\", + \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration + that was computed by the operator. Red Hat does not support the + use of this field. Misuse of this field could lead to unexpected + behavior or conflict with other configuration options. Seek guidance + from the Red Hat support before using this field. Use of this property + blocks cluster upgrades, it must be removed before upgrading your + cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + required: + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + generations: + description: generations are used to determine when an item needs + to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for + a given resource so that decisions about forced updates can be + made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without + generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload + controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're + tracking + type: string + type: object + type: array + x-kubernetes-list-type: atomic + observedGeneration: + description: observedGeneration is the last generation change you've + dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and + at the desired state + format: int32 + type: integer + version: + description: version is the level this availability applies to + type: string + type: object + required: + - metadata + - spec + type: object + x-kubernetes-validations: + - message: olm is a singleton, .metadata.name must be 'cluster' + rule: self.metadata.name == 'cluster' + served: true + storage: true + subresources: + status: {} diff --git a/vendor/modules.txt b/vendor/modules.txt index aa809e439f..eb7a385339 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -818,7 +818,7 @@ github.com/opencontainers/runc/libcontainer/user # github.com/opencontainers/runtime-spec v1.1.0 ## explicit github.com/opencontainers/runtime-spec/specs-go -# github.com/openshift/api v0.0.0-20240416170644-3efee4252217 +# github.com/openshift/api v0.0.0-20240422085825-2624175e9673 ## explicit; go 1.21 github.com/openshift/api github.com/openshift/api/annotations @@ -898,7 +898,7 @@ github.com/openshift/api/template github.com/openshift/api/template/v1 github.com/openshift/api/user github.com/openshift/api/user/v1 -# github.com/openshift/client-go v0.0.0-20240415214935-be70f772f157 +# github.com/openshift/client-go v0.0.0-20240422164335-6c851f4919dd ## explicit; go 1.21 github.com/openshift/client-go/build/applyconfigurations/build/v1 github.com/openshift/client-go/build/applyconfigurations/internal From baa37aec8cb536b010393f59a0d591a8e3025d88 Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Tue, 23 Apr 2024 18:02:20 -0400 Subject: [PATCH 14/18] *: add MCP PoolSynchronizersStatus support for pinned image sets Signed-off-by: Sam Batschelet --- cmd/machine-config-controller/start.go | 2 +- cmd/machine-config-daemon/start.go | 2 +- pkg/controller/node/status.go | 25 ++++++++ pkg/daemon/pinned_image_set.go | 80 +++++++++++++++----------- 4 files changed, 74 insertions(+), 35 deletions(-) diff --git a/cmd/machine-config-controller/start.go b/cmd/machine-config-controller/start.go index 1460a36623..2cf90a31cd 100644 --- a/cmd/machine-config-controller/start.go +++ b/cmd/machine-config-controller/start.go @@ -105,7 +105,7 @@ func runStartCmd(_ *cobra.Command, _ []string) { enabled, disabled := getEnabledDisabledFeatures(features) klog.Infof("FeatureGates initialized: enabled=%v disabled=%v", enabled, disabled) - if features.Enabled(configv1.FeatureGatePinnedImages) { + if features.Enabled(configv1.FeatureGatePinnedImages) && features.Enabled(configv1.FeatureGateMachineConfigNodes) { pinnedImageSet := pinnedimageset.New( ctrlctx.InformerFactory.Machineconfiguration().V1alpha1().PinnedImageSets(), ctrlctx.InformerFactory.Machineconfiguration().V1().MachineConfigPools(), diff --git a/cmd/machine-config-daemon/start.go b/cmd/machine-config-daemon/start.go index 164a3d6276..5cff83bc88 100644 --- a/cmd/machine-config-daemon/start.go +++ b/cmd/machine-config-daemon/start.go @@ -212,7 +212,7 @@ func runStartCmd(_ *cobra.Command, _ []string) { klog.Fatalf("Could not get FG: %v", err) } else { klog.Infof("FeatureGates initialized: knownFeatureGates=%v", featureGates.KnownFeatures()) - if featureGates.Enabled(configv1.FeatureGatePinnedImages) { + if featureGates.Enabled(configv1.FeatureGatePinnedImages) && featureGates.Enabled(configv1.FeatureGateMachineConfigNodes) { klog.Infof("Feature enabled: %s", configv1.FeatureGatePinnedImages) criClient, err := cri.NewClient(ctx, constants.DefaultCRIOSocketPath) if err != nil { diff --git a/pkg/controller/node/status.go b/pkg/controller/node/status.go index 91445c7a67..1ce9ae23cb 100644 --- a/pkg/controller/node/status.go +++ b/pkg/controller/node/status.go @@ -90,6 +90,7 @@ func calculateStatus(fg featuregates.FeatureGate, mcs []*mcfgalphav1.MachineConf machineCount := int32(len(nodes)) var degradedMachines, readyMachines, updatedMachines, unavailableMachines, updatingMachines []*corev1.Node + var updatingPinnedImageSetMachines int32 // if we represent updating properly here, we will also represent updating properly in the CO // so this solves the cordoning RFE and the upgradeable RFE // updating == updatePrepared, updateExecuted, updatedComplete, postAction, cordoning, draining @@ -113,11 +114,18 @@ func calculateStatus(fg featuregates.FeatureGate, mcs []*mcfgalphav1.MachineConf // not ready yet break } + pinnedImageSetsUpdating := false for _, cond := range state.Status.Conditions { if strings.Contains(cond.Message, "Error:") { degradedMachines = append(degradedMachines, ourNode) continue } + if fg.Enabled(configv1.FeatureGatePinnedImages) && cond.Status == metav1.ConditionTrue { + if mcfgalphav1.StateProgress(cond.Type) == mcfgalphav1.MachineConfigNodePinnedImageSetsProgressing || + mcfgalphav1.StateProgress(cond.Type) == mcfgalphav1.MachineConfigNodePinnedImageSetsDegraded { + pinnedImageSetsUpdating = true + } + } if cond.Status == metav1.ConditionUnknown { switch mcfgalphav1.StateProgress(cond.Type) { case mcfgalphav1.MachineConfigNodeUpdatePrepared: @@ -145,6 +153,9 @@ func calculateStatus(fg featuregates.FeatureGate, mcs []*mcfgalphav1.MachineConf } } } + if pinnedImageSetsUpdating { + updatingPinnedImageSetMachines++ + } } degradedMachineCount := int32(len(degradedMachines)) updatedMachineCount := int32(len(updatedMachines)) @@ -184,6 +195,20 @@ func calculateStatus(fg featuregates.FeatureGate, mcs []*mcfgalphav1.MachineConf DegradedMachineCount: degradedMachineCount, CertExpirys: certExpirys, } + + // update synchronizer status for pinned image sets + if fg.Enabled(configv1.FeatureGatePinnedImages) { + status.PoolSynchronizersStatus = []mcfgv1.PoolSynchronizerStatus{ + { + PoolSynchronizerType: mcfgv1.PinnedImageSets, + MachineCount: int64(machineCount), + UpdatedMachineCount: int64(machineCount - updatingPinnedImageSetMachines), + ReadyMachineCount: int64(readyMachineCount), + UnavailableMachineCount: int64(unavailableMachineCount), + }, + } + } + status.Configuration = pool.Status.Configuration conditions := pool.Status.Conditions diff --git a/pkg/daemon/pinned_image_set.go b/pkg/daemon/pinned_image_set.go index 14993563dd..8b6143f8cf 100644 --- a/pkg/daemon/pinned_image_set.go +++ b/pkg/daemon/pinned_image_set.go @@ -70,13 +70,17 @@ const ( // controller configuration maxRetriesController = 15 + + // degrade the pool after max retry threshold + degradeAfterRetries = maxRetriesController + // mcn looks for conditions with this prefix if seen will degrade the pool + degradeMessagePrefix = "Error:" ) var ( errInsufficientStorage = errors.New("storage available is less than minimum required") errFailedToPullImage = errors.New("failed to pull image") errNotFound = errors.New("not found") - errNoAuthForImage = errors.New("no auth found for image") ) // PinnedImageSetManager manages the prefetching of images. @@ -190,7 +194,6 @@ func NewPinnedImageSetManager( } func (p *PinnedImageSetManager) sync(key string) error { - klog.V(4).Infof("Syncing PinnedImageSet for pool: %s", key) node, err := p.nodeLister.Get(p.nodeName) if err != nil { return fmt.Errorf("failed to get node %q: %w", p.nodeName, err) @@ -208,17 +211,18 @@ func (p *PinnedImageSetManager) sync(key string) error { ctx, cancel := context.WithTimeout(context.Background(), p.prefetchTimeout) // cancel any currently running tasks in the worker pool p.resetWorkload(cancel) + p.updateStatusProgressing() if err := p.syncMachineConfigPools(ctx, pools); err != nil { if errors.Is(err, context.DeadlineExceeded) { ctxErr := fmt.Errorf("requeue: prefetching images incomplete after: %v", p.prefetchTimeout) - p.updateStatusError(pools, ctxErr) + p.updateStatusError(key, pools, ctxErr) klog.Info(ctxErr) return ctxErr } - klog.Errorf("Failed to prefetch and pin images: %v", err) - p.updateStatusError(pools, err) + klog.Error(err) + p.updateStatusError(key, pools, err) return err } @@ -496,6 +500,33 @@ func ensureCrioPinnedImagesConfigFile(path string, imageNames []string) error { return nil } +func (p *PinnedImageSetManager) updateStatusProgressing() { + node, err := p.nodeLister.Get(p.nodeName) + if err != nil { + klog.Errorf("Failed to get node %q: %v", p.nodeName, err) + return + } + + err = upgrademonitor.UpdateMachineConfigNodeStatus( + &upgrademonitor.Condition{ + State: mcfgv1alpha1.MachineConfigNodePinnedImageSetsProgressing, + Reason: "ImagePrefetch", + Message: fmt.Sprintf("node is prefetching images: %s", node.Name), + }, + nil, + metav1.ConditionTrue, + metav1.ConditionUnknown, + node, + p.mcfgClient, + nil, + nil, + p.featureGatesAccessor, + ) + if err != nil { + klog.Errorf("Failed to updated machine config node: %v", err) + } +} + func (p *PinnedImageSetManager) updateStatusProgressingComplete(pools []*mcfgv1.MachineConfigPool, message string) { node, err := p.nodeLister.Get(p.nodeName) if err != nil { @@ -540,8 +571,8 @@ func (p *PinnedImageSetManager) updateStatusProgressingComplete(pools []*mcfgv1. metav1.ConditionUnknown, node, p.mcfgClient, - applyCfg, - imageSets, + nil, + nil, p.featureGatesAccessor, ) if err != nil { @@ -549,49 +580,32 @@ func (p *PinnedImageSetManager) updateStatusProgressingComplete(pools []*mcfgv1. } } -func (p *PinnedImageSetManager) updateStatusError(pools []*mcfgv1.MachineConfigPool, statusErr error) { +func (p *PinnedImageSetManager) updateStatusError(key interface{}, pools []*mcfgv1.MachineConfigPool, statusErr error) { node, err := p.nodeLister.Get(p.nodeName) if err != nil { klog.Errorf("Failed to get node %q: %v", p.nodeName, err) return } - applyCfg, imageSets, err := getImageSetMetadata(p.imageSetLister, pools, nil) + applyCfg, imageSets, err := getImageSetMetadata(p.imageSetLister, pools, statusErr) if err != nil { klog.Errorf("Failed to get image set apply configs: %v", err) return } - err = upgrademonitor.UpdateMachineConfigNodeStatus( - &upgrademonitor.Condition{ - State: mcfgv1alpha1.MachineConfigNodePinnedImageSetsProgressing, - Reason: "ImagePrefetch", - Message: fmt.Sprintf("node is prefetching images: %s", node.Name), - }, - nil, - metav1.ConditionTrue, - metav1.ConditionUnknown, - node, - p.mcfgClient, - applyCfg, - imageSets, - p.featureGatesAccessor, - ) - if err != nil { - klog.Errorf("Failed to updated machine config node: %v", err) - } - - applyCfg, imageSets, err = getImageSetMetadata(p.imageSetLister, pools, statusErr) - if err != nil { - klog.Errorf("Failed to get image set apply configs: %v", err) - return + var errMsg string + if p.queue.NumRequeues(key) >= degradeAfterRetries { + // degrade the pool after max retries + errMsg = fmt.Sprintf("%s %v", degradeMessagePrefix, statusErr) + } else { + errMsg = statusErr.Error() } err = upgrademonitor.UpdateMachineConfigNodeStatus( &upgrademonitor.Condition{ State: mcfgv1alpha1.MachineConfigNodePinnedImageSetsDegraded, Reason: "PrefetchFailed", - Message: "Error: " + statusErr.Error(), + Message: errMsg, }, nil, metav1.ConditionTrue, From 6ce48d6b219a1106af209aebff6fa7f0051d0b50 Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Thu, 25 Apr 2024 08:34:25 -0400 Subject: [PATCH 15/18] daemon/pinnedimageset: ensure pool is degraded if out of space Signed-off-by: Sam Batschelet --- pkg/controller/node/status.go | 39 ++- .../pinnedimageset/pinned_image_set.go | 2 +- pkg/daemon/constants/constants.go | 2 +- pkg/daemon/pinned_image_set.go | 222 +++++++++++------- pkg/daemon/pinned_image_set_test.go | 127 +++++++++- 5 files changed, 286 insertions(+), 106 deletions(-) diff --git a/pkg/controller/node/status.go b/pkg/controller/node/status.go index 1ce9ae23cb..c2f3a97204 100644 --- a/pkg/controller/node/status.go +++ b/pkg/controller/node/status.go @@ -88,9 +88,11 @@ func calculateStatus(fg featuregates.FeatureGate, mcs []*mcfgalphav1.MachineConf } } machineCount := int32(len(nodes)) + updatingPinnedImageSetMachines := int32(0) var degradedMachines, readyMachines, updatedMachines, unavailableMachines, updatingMachines []*corev1.Node - var updatingPinnedImageSetMachines int32 + degradedReasons := []string{} + // if we represent updating properly here, we will also represent updating properly in the CO // so this solves the cordoning RFE and the upgradeable RFE // updating == updatePrepared, updateExecuted, updatedComplete, postAction, cordoning, draining @@ -114,18 +116,23 @@ func calculateStatus(fg featuregates.FeatureGate, mcs []*mcfgalphav1.MachineConf // not ready yet break } - pinnedImageSetsUpdating := false + if fg.Enabled(configv1.FeatureGatePinnedImages) { + if isPinnedImageSetNodeUpdating(state) { + updatingPinnedImageSetMachines++ + } + } for _, cond := range state.Status.Conditions { if strings.Contains(cond.Message, "Error:") { degradedMachines = append(degradedMachines, ourNode) - continue - } - if fg.Enabled(configv1.FeatureGatePinnedImages) && cond.Status == metav1.ConditionTrue { - if mcfgalphav1.StateProgress(cond.Type) == mcfgalphav1.MachineConfigNodePinnedImageSetsProgressing || - mcfgalphav1.StateProgress(cond.Type) == mcfgalphav1.MachineConfigNodePinnedImageSetsDegraded { - pinnedImageSetsUpdating = true + // populate the degradedReasons from the MachineConfigNodePinnedImageSetsDegraded condition + if fg.Enabled(configv1.FeatureGatePinnedImages) { + if mcfgalphav1.StateProgress(cond.Type) == mcfgalphav1.MachineConfigNodePinnedImageSetsDegraded && cond.Status == metav1.ConditionTrue { + degradedReasons = append(degradedReasons, fmt.Sprintf("Node %s is reporting: %q", ourNode.Name, cond.Message)) + } } + continue } + if cond.Status == metav1.ConditionUnknown { switch mcfgalphav1.StateProgress(cond.Type) { case mcfgalphav1.MachineConfigNodeUpdatePrepared: @@ -153,9 +160,6 @@ func calculateStatus(fg featuregates.FeatureGate, mcs []*mcfgalphav1.MachineConf } } } - if pinnedImageSetsUpdating { - updatingPinnedImageSetMachines++ - } } degradedMachineCount := int32(len(degradedMachines)) updatedMachineCount := int32(len(updatedMachines)) @@ -178,7 +182,6 @@ func calculateStatus(fg featuregates.FeatureGate, mcs []*mcfgalphav1.MachineConf degradedMachineCount = int32(len(degradedMachines)) } - degradedReasons := []string{} for _, n := range degradedMachines { reason, ok := n.Annotations[daemonconsts.MachineConfigDaemonReasonAnnotationKey] if ok && reason != "" { @@ -198,6 +201,7 @@ func calculateStatus(fg featuregates.FeatureGate, mcs []*mcfgalphav1.MachineConf // update synchronizer status for pinned image sets if fg.Enabled(configv1.FeatureGatePinnedImages) { + // TODO: update counts to be more granular status.PoolSynchronizersStatus = []mcfgv1.PoolSynchronizerStatus{ { PoolSynchronizerType: mcfgv1.PinnedImageSets, @@ -205,6 +209,7 @@ func calculateStatus(fg featuregates.FeatureGate, mcs []*mcfgalphav1.MachineConf UpdatedMachineCount: int64(machineCount - updatingPinnedImageSetMachines), ReadyMachineCount: int64(readyMachineCount), UnavailableMachineCount: int64(unavailableMachineCount), + AvailableMachineCount: int64(machineCount - unavailableMachineCount), }, } } @@ -279,6 +284,16 @@ func calculateStatus(fg featuregates.FeatureGate, mcs []*mcfgalphav1.MachineConf return status } +func isPinnedImageSetNodeUpdating(mcs *mcfgalphav1.MachineConfigNode) bool { + var updating int32 + for _, set := range mcs.Status.PinnedImageSets { + if set.CurrentGeneration != set.DesiredGeneration { + updating++ + } + } + return updating > 0 +} + func getPoolUpdateLine(pool *mcfgv1.MachineConfigPool) string { targetConfig := pool.Spec.Configuration.Name mcLine := fmt.Sprintf("MachineConfig %s", targetConfig) diff --git a/pkg/controller/pinnedimageset/pinned_image_set.go b/pkg/controller/pinnedimageset/pinned_image_set.go index 8615337786..53fa203803 100644 --- a/pkg/controller/pinnedimageset/pinned_image_set.go +++ b/pkg/controller/pinnedimageset/pinned_image_set.go @@ -191,7 +191,7 @@ func triggerPinnedImageSetChange(old, new *mcfgv1alpha1.PinnedImageSet) bool { if old.DeletionTimestamp != new.DeletionTimestamp { return true } - if !reflect.DeepEqual(old.Spec, new.Spec) { + if !reflect.DeepEqual(old, new) { return true } return false diff --git a/pkg/daemon/constants/constants.go b/pkg/daemon/constants/constants.go index 4cda3bc7b5..264da4a545 100644 --- a/pkg/daemon/constants/constants.go +++ b/pkg/daemon/constants/constants.go @@ -111,7 +111,7 @@ const ( // DaemonReloadCommand is used to specify reloads and restarts of the systemd manager configuration DaemonReloadCommand = "daemon-reload" - + // DefaultCRIOSocketPath is the default path to the CRI-O socket DefaultCRIOSocketPath = "/var/run/crio/crio.sock" diff --git a/pkg/daemon/pinned_image_set.go b/pkg/daemon/pinned_image_set.go index 8b6143f8cf..a188bf8fca 100644 --- a/pkg/daemon/pinned_image_set.go +++ b/pkg/daemon/pinned_image_set.go @@ -71,8 +71,6 @@ const ( // controller configuration maxRetriesController = 15 - // degrade the pool after max retry threshold - degradeAfterRetries = maxRetriesController // mcn looks for conditions with this prefix if seen will degrade the pool degradeMessagePrefix = "Error:" ) @@ -194,6 +192,7 @@ func NewPinnedImageSetManager( } func (p *PinnedImageSetManager) sync(key string) error { + klog.V(4).Infof("Syncing MachineConfigPool %q", key) node, err := p.nodeLister.Get(p.nodeName) if err != nil { return fmt.Errorf("failed to get node %q: %w", p.nodeName, err) @@ -211,23 +210,28 @@ func (p *PinnedImageSetManager) sync(key string) error { ctx, cancel := context.WithTimeout(context.Background(), p.prefetchTimeout) // cancel any currently running tasks in the worker pool p.resetWorkload(cancel) - p.updateStatusProgressing() + if err := p.updateStatusProgressing(); err != nil { + klog.Errorf("failed to update status: %v", err) + } if err := p.syncMachineConfigPools(ctx, pools); err != nil { if errors.Is(err, context.DeadlineExceeded) { ctxErr := fmt.Errorf("requeue: prefetching images incomplete after: %v", p.prefetchTimeout) - p.updateStatusError(key, pools, ctxErr) + if err := p.updateStatusError(pools, ctxErr); err != nil { + klog.Errorf("failed to update status: %v", err) + } klog.Info(ctxErr) return ctxErr } klog.Error(err) - p.updateStatusError(key, pools, err) + if err := p.updateStatusError(pools, err); err != nil { + klog.Errorf("failed to update status: %v", err) + } return err } - p.updateStatusProgressingComplete(pools, "All pinned image sets complete") - return nil + return p.updateStatusProgressingComplete(pools, "All pinned image sets complete") } func (p *PinnedImageSetManager) syncMachineConfigPools(ctx context.Context, pools []*mcfgv1.MachineConfigPool) error { @@ -292,6 +296,8 @@ func (p *PinnedImageSetManager) syncMachineConfigPool(ctx context.Context, pool } imageSets = append(imageSets, imageSet) } + // images are cached with size information + p.cache.ClearDigests() return p.prefetchImageSets(ctx, imageSets...) } @@ -302,6 +308,11 @@ func (p *PinnedImageSetManager) checkNodeAllocatableStorage(ctx context.Context, return fmt.Errorf("failed to get node %q: %w", p.nodeName, err) } + // check if the node is ready + if err := checkNodeReady(node); err != nil { + return err + } + storageCapacity, ok := node.Status.Allocatable[corev1.ResourceEphemeralStorage] if !ok { return fmt.Errorf("node %q has no ephemeral storage capacity", p.nodeName) @@ -426,7 +437,7 @@ func (p *PinnedImageSetManager) checkImagePayloadStorage(ctx context.Context, im return err } if exists { - // dont account for storage if the image already exists + // do not calculate storage if the image already exists. continue } @@ -441,7 +452,7 @@ func (p *PinnedImageSetManager) checkImagePayloadStorage(ctx context.Context, im klog.Warningf("corrupted cache entry for image %q, deleting", imageName) p.cache.Remove(imageName) } - size, err := getImageSize(ctx, imageName, p.authFilePath) + size, err := p.getImageSize(ctx, imageName, p.authFilePath) if err != nil { return err } @@ -453,7 +464,8 @@ func (p *PinnedImageSetManager) checkImagePayloadStorage(ctx context.Context, im requiredStorage += size * 2 } - if requiredStorage >= capacity-p.minStorageAvailableBytes.Value() { + minimumStorage := p.minStorageAvailableBytes.Value() + if requiredStorage >= capacity-minimumStorage { klog.Errorf("%v capacity=%d, required=%d", errInsufficientStorage, capacity, requiredStorage+p.minStorageAvailableBytes.Value()) return errInsufficientStorage } @@ -496,18 +508,18 @@ func ensureCrioPinnedImagesConfigFile(path string, imageNames []string) error { } return crioReload() } + klog.Infof("CRI-O config file is up to date, no reload required") return nil } -func (p *PinnedImageSetManager) updateStatusProgressing() { +func (p *PinnedImageSetManager) updateStatusProgressing() error { node, err := p.nodeLister.Get(p.nodeName) if err != nil { - klog.Errorf("Failed to get node %q: %v", p.nodeName, err) - return + return fmt.Errorf("failed to get node %q: %w", p.nodeName, err) } - err = upgrademonitor.UpdateMachineConfigNodeStatus( + return upgrademonitor.UpdateMachineConfigNodeStatus( &upgrademonitor.Condition{ State: mcfgv1alpha1.MachineConfigNodePinnedImageSetsProgressing, Reason: "ImagePrefetch", @@ -522,22 +534,17 @@ func (p *PinnedImageSetManager) updateStatusProgressing() { nil, p.featureGatesAccessor, ) - if err != nil { - klog.Errorf("Failed to updated machine config node: %v", err) - } } -func (p *PinnedImageSetManager) updateStatusProgressingComplete(pools []*mcfgv1.MachineConfigPool, message string) { +func (p *PinnedImageSetManager) updateStatusProgressingComplete(pools []*mcfgv1.MachineConfigPool, message string) error { node, err := p.nodeLister.Get(p.nodeName) if err != nil { - klog.Errorf("Failed to get node %q: %v", p.nodeName, err) - return + return fmt.Errorf("failed to get node %q: %w", p.nodeName, err) } applyCfg, imageSets, err := getImageSetMetadata(p.imageSetLister, pools, nil) if err != nil { - klog.Errorf("Failed to get image set apply configs: %v", err) - return + return fmt.Errorf("failed to get image set apply configs: %w", err) } err = upgrademonitor.UpdateMachineConfigNodeStatus( @@ -560,7 +567,7 @@ func (p *PinnedImageSetManager) updateStatusProgressingComplete(pools []*mcfgv1. } // reset any degraded status - err = upgrademonitor.UpdateMachineConfigNodeStatus( + return upgrademonitor.UpdateMachineConfigNodeStatus( &upgrademonitor.Condition{ State: mcfgv1alpha1.MachineConfigNodePinnedImageSetsDegraded, Reason: "AsExpected", @@ -575,33 +582,28 @@ func (p *PinnedImageSetManager) updateStatusProgressingComplete(pools []*mcfgv1. nil, p.featureGatesAccessor, ) - if err != nil { - klog.Errorf("Failed to updated machine config node: %v", err) - } } -func (p *PinnedImageSetManager) updateStatusError(key interface{}, pools []*mcfgv1.MachineConfigPool, statusErr error) { +func (p *PinnedImageSetManager) updateStatusError(pools []*mcfgv1.MachineConfigPool, statusErr error) error { node, err := p.nodeLister.Get(p.nodeName) if err != nil { - klog.Errorf("Failed to get node %q: %v", p.nodeName, err) - return + return fmt.Errorf("failed to get node %q: %w", p.nodeName, err) } applyCfg, imageSets, err := getImageSetMetadata(p.imageSetLister, pools, statusErr) if err != nil { - klog.Errorf("Failed to get image set apply configs: %v", err) - return + return fmt.Errorf("failed to get image set apply configs: %w", err) } var errMsg string - if p.queue.NumRequeues(key) >= degradeAfterRetries { - // degrade the pool after max retries + if isErrNoSpace(statusErr) { + // degrade the pool if there is no space errMsg = fmt.Sprintf("%s %v", degradeMessagePrefix, statusErr) } else { errMsg = statusErr.Error() } - err = upgrademonitor.UpdateMachineConfigNodeStatus( + return upgrademonitor.UpdateMachineConfigNodeStatus( &upgrademonitor.Condition{ State: mcfgv1alpha1.MachineConfigNodePinnedImageSetsDegraded, Reason: "PrefetchFailed", @@ -616,9 +618,19 @@ func (p *PinnedImageSetManager) updateStatusError(key interface{}, pools []*mcfg imageSets, p.featureGatesAccessor, ) - if err != nil { - klog.Errorf("Failed to updated machine config node: %v", err) +} + +func checkNodeReady(node *corev1.Node) error { + for i := range node.Status.Conditions { + cond := &node.Status.Conditions[i] + if cond.Type == corev1.NodeReady && cond.Status != corev1.ConditionTrue { + return fmt.Errorf("node %s is reporting NotReady=%v", node.Name, cond.Status) + } + if cond.Type == corev1.NodeDiskPressure && cond.Status != corev1.ConditionFalse { + return fmt.Errorf("node %s is reporting OutOfDisk=%v", node.Name, cond.Status) + } } + return nil } // getImageSetMetadata populates observed pinned image set generation details for node level status updates to MachineConfigNode. @@ -697,6 +709,10 @@ func (p *PinnedImageSetManager) cancelWorkload(reason string) { // prefetchWorker is a worker that pulls images from the container runtime. func (p *PinnedImageSetManager) prefetchWorker(ctx context.Context) { for task := range p.prefetchCh { + if task.monitor.Drain() { + task.monitor.Done() + continue + } if err := ensurePullImage(ctx, p.criClient, p.backoff, task.image, task.auth); err != nil { task.monitor.Error(err) klog.Warningf("failed to prefetch image %q: %v", task.image, err) @@ -1008,6 +1024,41 @@ func (p *PinnedImageSetManager) handleErr(err error, key interface{}) { p.queue.AddAfter(key, 1*time.Minute) } +func (p *PinnedImageSetManager) getImageSize(ctx context.Context, imageName, authFilePath string) (int64, error) { + args := []string{ + "manifest", + "--log-level", "error", // suppress warn log output + "inspect", + "--authfile", authFilePath, + imageName, + } + + output, err := exec.CommandContext(ctx, "podman", args...).CombinedOutput() + if err != nil && strings.Contains(err.Error(), "manifest unknown") { + return 0, errNotFound + } + if err != nil { + return 0, fmt.Errorf("failed to execute podman manifest inspect for %q: %w", imageName, err) + } + + var manifest ocispec.Manifest + err = json.Unmarshal(output, &manifest) + if err != nil { + return 0, fmt.Errorf("failed to unmarshal manifest for %q: %w", imageName, err) + } + + var totalSize int64 + for _, layer := range manifest.Layers { + if p.cache.HasDigest(layer.Digest.String()) { + continue + } + totalSize += layer.Size + p.cache.AddDigest(layer.Digest.String()) + } + + return totalSize, nil +} + // ensurePullImage first checks if the image exists locally and then will attempt to pull // the image from the container runtime with a retry/backoff. func ensurePullImage(ctx context.Context, client *cri.Client, backoff wait.Backoff, image string, authConfig *runtimeapi.AuthConfig) error { @@ -1046,14 +1097,13 @@ func ensurePullImage(ctx context.Context, client *cri.Client, backoff wait.Backo } func isErrNoSpace(err error) bool { - if err == syscall.ENOSPC { + if errors.Is(err, syscall.ENOSPC) { return true } - if werr, ok := err.(*os.PathError); ok { - if werr.Err == syscall.ENOSPC { - return true - } + if strings.Contains(err.Error(), "no space left on device") { + return true } + return false } @@ -1147,14 +1197,37 @@ func isNodeInPool(nodes []*corev1.Node, nodeName string) bool { type imageCache struct { mu sync.RWMutex cache *lru.Cache + // digestSeen is used to track if a digest has been seen before. This is + // used to avoid calculating the size of the same blob multiple times. + digestSeen map[string]struct{} } func newImageCache(maxEntries int) *imageCache { return &imageCache{ - cache: lru.New(maxEntries), + cache: lru.New(maxEntries), + digestSeen: make(map[string]struct{}), } } +func (c *imageCache) HasDigest(digest string) bool { + c.mu.RLock() + defer c.mu.RUnlock() + _, seen := c.digestSeen[digest] + return seen +} + +func (c *imageCache) AddDigest(digest string) { + c.mu.Lock() + c.digestSeen[digest] = struct{}{} + c.mu.Unlock() +} + +func (c *imageCache) ClearDigests() { + c.mu.Lock() + c.digestSeen = make(map[string]struct{}) + c.mu.Unlock() +} + func (c *imageCache) Add(key, value interface{}) { c.mu.Lock() c.cache.Add(key, value) @@ -1170,6 +1243,7 @@ func (c *imageCache) Get(key interface{}) (value interface{}, ok bool) { func (c *imageCache) Clear() { c.mu.Lock() c.cache.Clear() + c.digestSeen = make(map[string]struct{}) c.mu.Unlock() } @@ -1186,37 +1260,6 @@ type imageInfo struct { Pulled bool } -func getImageSize(ctx context.Context, imageName, authFilePath string) (int64, error) { - args := []string{ - "manifest", - "--log-level", "error", // suppress warn log output - "inspect", - "--authfile", authFilePath, - imageName, - } - - output, err := exec.CommandContext(ctx, "podman", args...).CombinedOutput() - if err != nil && strings.Contains(err.Error(), "manifest unknown") { - return 0, errNotFound - } - if err != nil { - return 0, fmt.Errorf("failed to execute podman manifest inspect for %q: %w", imageName, err) - } - - var manifest ocispec.Manifest - err = json.Unmarshal(output, &manifest) - if err != nil { - return 0, fmt.Errorf("failed to unmarshal manifest for %q: %w", imageName, err) - } - - var totalSize int64 - for _, layer := range manifest.Layers { - totalSize += layer.Size - } - - return totalSize, nil -} - func triggerPinnedImageSetChange(old, new *mcfgv1alpha1.PinnedImageSet) bool { if old.DeletionTimestamp != new.DeletionTimestamp { return true @@ -1354,20 +1397,20 @@ type prefetch struct { // prefetchMonitor is used to monitor the status of prefetch operations. type prefetchMonitor struct { - once sync.Once - errFn func(error) - err error wg sync.WaitGroup + mu sync.RWMutex + drain bool + err error } func newPrefetchMonitor() *prefetchMonitor { - m := &prefetchMonitor{} - m.errFn = func(err error) { - m.once.Do(func() { - m.err = err - }) - } - return m + return &prefetchMonitor{} +} + +func (m *prefetchMonitor) Drain() bool { + m.mu.RLock() + defer m.mu.RUnlock() + return m.drain } // Add increments the number of prefetch operations the monitor is waiting for. @@ -1375,15 +1418,14 @@ func (m *prefetchMonitor) Add(i int) { m.wg.Add(i) } -func (m *prefetchMonitor) finalize(err error) { - m.once.Do(func() { - m.err = err - }) -} - // Error is called when an error occurs during prefetching. func (m *prefetchMonitor) Error(err error) { - m.finalize(err) + m.mu.Lock() + if isErrNoSpace(err) { + m.drain = true + } + m.err = err + m.mu.Unlock() } // Done decrements the number of prefetch operations the monitor is waiting for. diff --git a/pkg/daemon/pinned_image_set_test.go b/pkg/daemon/pinned_image_set_test.go index f5082fa857..a53bd211bb 100644 --- a/pkg/daemon/pinned_image_set_test.go +++ b/pkg/daemon/pinned_image_set_test.go @@ -7,6 +7,7 @@ import ( "os" "path/filepath" "strings" + "syscall" "testing" "time" @@ -151,6 +152,7 @@ func TestPrefetchImageSets(t *testing.T) { wantValidTarget bool wantErr error wantNotFound bool + wantNoSpace bool wantPulledImages int }{ { @@ -261,6 +263,27 @@ func TestPrefetchImageSets(t *testing.T) { registryAvailableImages: nil, wantErr: os.ErrNotExist, }, + { + name: "out of space should drain and return out of space error quickly", + authFilePath: authFilePath, + machineConfigPools: []runtime.Object{ + fakeWorkerPoolPinnedImageSets, + }, + nodes: []runtime.Object{ + fakeStableStorageWorkerNode, + }, + wantValidTarget: true, + imageSets: []runtime.Object{ + &mcfgv1alpha1.PinnedImageSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-pinned-images", + }, + Spec: generateImageSetSpec(outOfDiskImage, 300), + }, + }, + registryAvailableImages: nil, + wantNoSpace: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -350,6 +373,10 @@ func TestPrefetchImageSets(t *testing.T) { require.ErrorIs(err, tt.wantErr) return } + if tt.wantNoSpace { + require.True(isErrNoSpace(err)) + return + } require.NoError(err) require.Equal(tt.wantPulledImages, runtime.imagesPulled()) }) @@ -427,6 +454,85 @@ func TestPinnedImageSetAddEventHandlers(t *testing.T) { } } +func TestCheckNodeAllocatableStorage(t *testing.T) { + require := require.New(t) + tests := []struct { + name string + node runtime.Object + pinnedImageSet *mcfgv1alpha1.PinnedImageSet + // uncompressed is x 2 + imagesSizeCompressed string + minFreeStorageAfterPrefetch string + wantErr error + }{ + { + name: "happy path", + node: fakeNode([]string{"worker"}, "25Gi"), + pinnedImageSet: workerPis, + imagesSizeCompressed: "5Gi", + minFreeStorageAfterPrefetch: "10Gi", + }, + { + name: "not enough storage", + node: fakeNode([]string{"worker"}, "25Gi"), + pinnedImageSet: workerPis, + imagesSizeCompressed: "10Gi", + minFreeStorageAfterPrefetch: "10Gi", + wantErr: errInsufficientStorage, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fakeClient := fake.NewSimpleClientset(tt.node) + informerFactory := informers.NewSharedInformerFactory(fakeClient, noResyncPeriodFunc()) + nodeInformer := informerFactory.Core().V1().Nodes() + + informerFactory.Start(ctx.Done()) + informerFactory.WaitForCacheSync(ctx.Done()) + + err := nodeInformer.Informer().GetIndexer().Add(tt.node) + require.NoError(err) + + // setup fake cri runtime with client + runtime := newFakeRuntime([]string{availableImage}, []string{}) + listener, err := newTestListener() + require.NoError(err) + err = runtime.Start(listener) + require.NoError(err) + defer runtime.Stop() + runtimeEndpoint := listener.Addr().String() + + criClient, err := cri.NewClient(ctx, runtimeEndpoint) + require.NoError(err) + + node := tt.node.(*corev1.Node) + p := &PinnedImageSetManager{ + nodeName: node.Name, + cache: newImageCache(10), + nodeLister: nodeInformer.Lister(), + criClient: criClient, + minStorageAvailableBytes: resource.MustParse(tt.minFreeStorageAfterPrefetch), + } + + imageSize := resource.MustParse(tt.imagesSizeCompressed) + size, ok := imageSize.AsInt64() + require.True(ok) + + // populate the cache with the pinned image set image size + p.cache.Add("image1", imageInfo{Name: "image1", Size: size}) + + err = p.checkNodeAllocatableStorage(ctx, tt.pinnedImageSet) + if tt.wantErr != nil { + require.ErrorIs(err, tt.wantErr) + return + } + require.NoError(err) + }) + } +} + func TestEnsureCrioPinnedImagesConfigFile(t *testing.T) { require := require.New(t) tmpDir := t.TempDir() @@ -472,10 +578,24 @@ var ( fakeWorkerPoolPinnedImageSets = fakeMachineConfigPool("worker", []mcfgv1.PinnedImageSetRef{{Name: "worker-set"}}) fakeWorkerPoolNoPinnedImageSets = fakeMachineConfigPool("worker", nil) availableImage = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0000000000000000000000000000000000000000000000000000000000000000" + outOfDiskImage = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1111111111111111111111111111111111111111111111111111111111111111" // slowImage is a fake image that is used to block the image puller. This simulates a slow image pull of 1 seconds. slowImage = "quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" ) +func generateImageSetSpec(imageName string, count int) mcfgv1alpha1.PinnedImageSetSpec { + pinnedImages := make([]mcfgv1alpha1.PinnedImageRef, count) + for i := 0; i < count; i++ { + pinnedImages[i] = mcfgv1alpha1.PinnedImageRef{ + Name: imageName, + } + } + + return mcfgv1alpha1.PinnedImageSetSpec{ + PinnedImages: pinnedImages, + } +} + func fakeNode(roles []string, allocatableStorage string) *corev1.Node { labels := map[string]string{} for _, role := range roles { @@ -489,7 +609,7 @@ func fakeNode(roles []string, allocatableStorage string) *corev1.Node { Labels: labels, }, Status: corev1.NodeStatus{ - Capacity: corev1.ResourceList{ + Allocatable: corev1.ResourceList{ corev1.ResourceStorage: resource.MustParse(allocatableStorage), corev1.ResourceEphemeralStorage: resource.MustParse(allocatableStorage), }, @@ -591,7 +711,10 @@ func (*FakeRuntime) ListImages(context.Context, *runtimeapi.ListImagesRequest) ( // PullImage implements v1.ImageServiceServer. func (r *FakeRuntime) PullImage(ctx context.Context, req *runtimeapi.PullImageRequest) (*runtimeapi.PullImageResponse, error) { - if req.Image.Image == slowImage { + switch req.Image.Image { + case outOfDiskImage: + return nil, fmt.Errorf("writing blob: storing blob to file \"/var/tmp/container_images_storage708850218/3\": write /var/tmp/container_images_storage708850218/3: %v", syscall.ENOSPC) + case slowImage: time.Sleep(1 * time.Second) } From 8a5f52cd59af20a61f679fd74db6622097e831ee Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Thu, 25 Apr 2024 15:13:12 -0400 Subject: [PATCH 16/18] daemon/pinnedimagesets: retry if local node is not found by informer Signed-off-by: Sam Batschelet --- pkg/daemon/pinned_image_set.go | 37 ++++++++++++++++++++++++----- pkg/daemon/pinned_image_set_test.go | 2 +- 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/pkg/daemon/pinned_image_set.go b/pkg/daemon/pinned_image_set.go index a188bf8fca..4f5ef2381d 100644 --- a/pkg/daemon/pinned_image_set.go +++ b/pkg/daemon/pinned_image_set.go @@ -79,6 +79,7 @@ var ( errInsufficientStorage = errors.New("storage available is less than minimum required") errFailedToPullImage = errors.New("failed to pull image") errNotFound = errors.New("not found") + errRequeueAfterTimeout = errors.New("requeue: prefetching images incomplete after timeout") ) // PinnedImageSetManager manages the prefetching of images. @@ -193,7 +194,7 @@ func NewPinnedImageSetManager( func (p *PinnedImageSetManager) sync(key string) error { klog.V(4).Infof("Syncing MachineConfigPool %q", key) - node, err := p.nodeLister.Get(p.nodeName) + node, err := p.getNodeWithRetry(p.nodeName) if err != nil { return fmt.Errorf("failed to get node %q: %w", p.nodeName, err) } @@ -216,7 +217,7 @@ func (p *PinnedImageSetManager) sync(key string) error { if err := p.syncMachineConfigPools(ctx, pools); err != nil { if errors.Is(err, context.DeadlineExceeded) { - ctxErr := fmt.Errorf("requeue: prefetching images incomplete after: %v", p.prefetchTimeout) + ctxErr := fmt.Errorf("%w: %v", errRequeueAfterTimeout, p.prefetchTimeout) if err := p.updateStatusError(pools, ctxErr); err != nil { klog.Errorf("failed to update status: %v", err) } @@ -377,6 +378,9 @@ func (p *PinnedImageSetManager) scheduleWork(ctx context.Context, prefetchCh cha } scheduledImages := 0 for _, imageRef := range prefetchImages { + if monitor.Drain() { + continue + } select { case <-ctx.Done(): return ctx.Err() @@ -669,7 +673,7 @@ func getImageSetMetadata(imageSetLister mcfglistersv1alpha1.PinnedImageSetLister // getWorkerCount returns the number of workers to use for prefetching images. func (p *PinnedImageSetManager) getWorkerCount() (int, error) { - node, err := p.nodeLister.Get(p.nodeName) + node, err := p.getNodeWithRetry(p.nodeName) if err != nil { return 0, fmt.Errorf("failed to get node %q: %w", p.nodeName, err) } @@ -788,7 +792,7 @@ func (p *PinnedImageSetManager) addPinnedImageSet(obj interface{}) { return } - node, err := p.nodeLister.Get(p.nodeName) + node, err := p.getNodeWithRetry(p.nodeName) if err != nil { klog.Errorf("failed to get node %q: %v", p.nodeName, err) return @@ -827,7 +831,7 @@ func (p *PinnedImageSetManager) deletePinnedImageSet(obj interface{}) { } } - node, err := p.nodeLister.Get(p.nodeName) + node, err := p.getNodeWithRetry(p.nodeName) if err != nil { klog.Errorf("failed to get node %q: %v", p.nodeName, err) return @@ -851,6 +855,27 @@ func (p *PinnedImageSetManager) deletePinnedImageSet(obj interface{}) { } } +// getNodeWithRetry gets the node with retries. This avoids some races when the local node +// is new but not found during startup. +func (p *PinnedImageSetManager) getNodeWithRetry(nodeName string) (*corev1.Node, + error) { + var node *corev1.Node + err := wait.ExponentialBackoff(p.backoff, func() (bool, error) { + var err error + node, err = p.nodeLister.Get(nodeName) + if err != nil { + if apierrors.IsNotFound(err) { + // log warning and retry because we are tolerating unexpected behavior from the informer + klog.Warningf("Node %q not found, retrying", nodeName) + return false, nil + } + return false, err + } + return true, nil + }) + return node, err +} + func (p *PinnedImageSetManager) updatePinnedImageSet(oldObj, newObj interface{}) { oldImageSet := oldObj.(*mcfgv1alpha1.PinnedImageSet) newImageSet := newObj.(*mcfgv1alpha1.PinnedImageSet) @@ -860,7 +885,7 @@ func (p *PinnedImageSetManager) updatePinnedImageSet(oldObj, newObj interface{}) return } - node, err := p.nodeLister.Get(p.nodeName) + node, err := p.getNodeWithRetry(p.nodeName) if err != nil { klog.Errorf("failed to get node %q: %v", p.nodeName, err) return diff --git a/pkg/daemon/pinned_image_set_test.go b/pkg/daemon/pinned_image_set_test.go index a53bd211bb..599ef5d213 100644 --- a/pkg/daemon/pinned_image_set_test.go +++ b/pkg/daemon/pinned_image_set_test.go @@ -348,7 +348,7 @@ func TestPrefetchImageSets(t *testing.T) { nodeListerSynced: nodeInformer.Informer().HasSynced, prefetchCh: make(chan prefetch, defaultPrefetchWorkers*2), backoff: wait.Backoff{ - Steps: 1, + Steps: maxRetries, Duration: 10 * time.Millisecond, Factor: retryFactor, Cap: 10 * time.Millisecond, From 836dd8dac38c50a5bcd58b5cb665d71efca43351 Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Thu, 25 Apr 2024 19:06:10 -0400 Subject: [PATCH 17/18] daemon/pinnedimagesets: ensure sync on first boot Signed-off-by: Sam Batschelet --- pkg/daemon/pinned_image_set.go | 44 ++++++++++++++++++++++++++++- pkg/daemon/pinned_image_set_test.go | 6 ++++ 2 files changed, 49 insertions(+), 1 deletion(-) diff --git a/pkg/daemon/pinned_image_set.go b/pkg/daemon/pinned_image_set.go index 4f5ef2381d..ee43a9886b 100644 --- a/pkg/daemon/pinned_image_set.go +++ b/pkg/daemon/pinned_image_set.go @@ -123,9 +123,12 @@ type PinnedImageSetManager struct { queue workqueue.RateLimitingInterface featureGatesAccessor featuregates.FeatureGateAccess - // mutex to protect the cancelFn + // mutex protects cancelFn mu sync.Mutex cancelFn context.CancelFunc + + once sync.Once + bootstrapped bool } // NewPinnedImageSetManager creates a new pinned image set manager. @@ -175,6 +178,11 @@ func NewPinnedImageSetManager( DeleteFunc: p.deleteMachineConfigPool, }) + nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: p.handleNodeEvent, + UpdateFunc: func(oldObj, newObj interface{}) { p.handleNodeEvent(newObj) }, + }) + p.syncHandler = p.sync p.enqueueMachineConfigPool = p.enqueue @@ -912,6 +920,40 @@ func (p *PinnedImageSetManager) updatePinnedImageSet(oldObj, newObj interface{}) } } +func (p *PinnedImageSetManager) handleNodeEvent(newObj interface{}) { + newNode := newObj.(*corev1.Node) + if newNode.Name != p.nodeName { + return + } + + pools, _, err := helpers.GetPoolsForNode(p.mcpLister, newNode) + if err != nil { + klog.Errorf("error finding pools for node %s: %v", newNode.Name, err) + return + } + if pools == nil { + return + } + + // handle first sync during startup + if !p.isBootstrapped() { + for _, pool := range pools { + p.enqueueMachineConfigPool(pool) + } + } + p.setBootstrapped() +} + +func (p *PinnedImageSetManager) isBootstrapped() bool { + return p.bootstrapped +} + +func (p *PinnedImageSetManager) setBootstrapped() { + defer p.once.Do(func() { + p.bootstrapped = true + }) +} + func (p *PinnedImageSetManager) addMachineConfigPool(obj interface{}) { pool := obj.(*mcfgv1.MachineConfigPool) if pool.DeletionTimestamp != nil { diff --git a/pkg/daemon/pinned_image_set_test.go b/pkg/daemon/pinned_image_set_test.go index 599ef5d213..7b8e577b99 100644 --- a/pkg/daemon/pinned_image_set_test.go +++ b/pkg/daemon/pinned_image_set_test.go @@ -446,6 +446,12 @@ func TestPinnedImageSetAddEventHandlers(t *testing.T) { nodeLister: nodeInformer.Lister(), mcpLister: mcpInformer.Lister(), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pinned-image-set-manager"), + backoff: wait.Backoff{ + Steps: maxRetries, + Duration: 10 * time.Millisecond, + Factor: retryFactor, + Cap: 10 * time.Millisecond, + }, } p.enqueueMachineConfigPool = p.enqueue p.addPinnedImageSet(tt.currentImageSet) From fd2c36e11f6b9892683001e34c7d26b8a59a4bd2 Mon Sep 17 00:00:00 2001 From: Sam Batschelet Date: Fri, 26 Apr 2024 11:49:41 -0400 Subject: [PATCH 18/18] daemon/pinned-image-set: ensure cleanup of crio config file on delete Signed-off-by: Sam Batschelet --- pkg/daemon/pinned_image_set.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pkg/daemon/pinned_image_set.go b/pkg/daemon/pinned_image_set.go index ee43a9886b..759b83bcbf 100644 --- a/pkg/daemon/pinned_image_set.go +++ b/pkg/daemon/pinned_image_set.go @@ -273,7 +273,7 @@ func (p *PinnedImageSetManager) syncMachineConfigPools(ctx context.Context, pool } if !exists { p.cache.Clear() - return errFailedToPullImage + return fmt.Errorf("%w: image removed during sync: %s", errFailedToPullImage, image) } } @@ -497,6 +497,8 @@ func ensureCrioPinnedImagesConfigFile(path string, imageNames []string) error { return fmt.Errorf("failed to remove CRI-O config file: %w", err) } return crioReload() + } else if len(imageNames) == 0 { + return nil } var existingCfgBytes []byte @@ -1180,6 +1182,10 @@ func uniqueSortedImageNames(images []mcfgv1alpha1.PinnedImageRef) []string { for _, image := range images { if _, ok := seen[image.Name]; !ok { + trimmedName := strings.TrimSpace(image.Name) + if trimmedName == "" { + continue + } seen[image.Name] = struct{}{} unique = append(unique, image.Name) }